diff --git a/.buildkite/Dockerfile b/.buildkite/Dockerfile index 2bf3886dc..0de3234dc 100644 --- a/.buildkite/Dockerfile +++ b/.buildkite/Dockerfile @@ -12,5 +12,3 @@ WORKDIR /usr/src/app COPY package.json . RUN npm install - -COPY . . diff --git a/.buildkite/Dockerfile-make b/.buildkite/Dockerfile-make index 3805eb0a2..b171f5d03 100644 --- a/.buildkite/Dockerfile-make +++ b/.buildkite/Dockerfile-make @@ -25,6 +25,3 @@ USER ${BUILDER_UID}:${BUILDER_GID} # install dependencies COPY package.json . RUN npm install - -# copy project files -COPY . . diff --git a/.buildkite/make.mjs b/.buildkite/make.mjs index 3026b61f3..ddc91d01f 100644 --- a/.buildkite/make.mjs +++ b/.buildkite/make.mjs @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* global $ argv */ @@ -123,7 +109,7 @@ async function codegen (args) { await $`rm -rf ${join(import.meta.url, '..', 'src', 'api')}` await $`mkdir ${join(import.meta.url, '..', 'src', 'api')}` await $`cp -R ${join(import.meta.url, '..', '..', 'elastic-client-generator-js', 'output')}/* ${join(import.meta.url, '..', 'src', 'api')}` - await $`mv ${join(import.meta.url, '..', 'src', 'api', 'reference.asciidoc')} ${join(import.meta.url, '..', 'docs', 'reference.asciidoc')}` + await $`mv ${join(import.meta.url, '..', 'src', 'api', 'reference.md')} ${join(import.meta.url, '..', 'docs', 'reference', 'api-reference.md')}` await $`npm run build` // run docs example generation diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml index c5146fc68..8a7e176b1 100644 --- a/.buildkite/pipeline.yml +++ b/.buildkite/pipeline.yml @@ -1,17 +1,20 @@ --- +agents: + provider: "gcp" + image: family/core-ubuntu-2204 + memory: "8G" + cpu: "2" + steps: - - label: ":elasticsearch: :javascript: ES JavaScript ({{ matrix.nodejs }}) Test Suite: {{ matrix.suite }}" - agents: - provider: "gcp" + - label: ":elasticsearch: :javascript: ES JavaScript ({{ matrix.nodejs }})" env: NODE_VERSION: "{{ matrix.nodejs }}" - TEST_SUITE: "{{ matrix.suite }}" - STACK_VERSION: 8.16.0 + TEST_SUITE: "platinum" + STACK_VERSION: 9.0.0 + GITHUB_TOKEN_PATH: "secret/ci/elastic-elasticsearch-js/github-token" + TEST_ES_STACK: "1" matrix: setup: - suite: - - "free" - - "platinum" nodejs: - "18" - "20" @@ -21,9 +24,6 @@ steps: - wait: ~ continue_on_failure: true - label: ":junit: Test results" - agents: - provider: "gcp" - image: family/core-ubuntu-2204 plugins: - junit-annotate#v2.6.0: artifacts: "junit-output/junit-*.xml" diff --git a/.buildkite/run-client.sh b/.buildkite/run-client.sh index 59ed168e7..872d57812 100755 --- a/.buildkite/run-client.sh +++ b/.buildkite/run-client.sh @@ -10,22 +10,29 @@ export NODE_VERSION=${NODE_VERSION:-18} echo "--- :javascript: Building Docker image" docker build \ - --file "$script_path/Dockerfile" \ - --tag elastic/elasticsearch-js \ - --build-arg NODE_VERSION="$NODE_VERSION" \ - . + --file "$script_path/Dockerfile" \ + --tag elastic/elasticsearch-js \ + --build-arg NODE_VERSION="$NODE_VERSION" \ + . -echo "--- :javascript: Running $TEST_SUITE tests" +GITHUB_TOKEN=$(vault read -field=token "$GITHUB_TOKEN_PATH") +export GITHUB_TOKEN + +echo "--- :javascript: Running tests" mkdir -p "$repo/junit-output" docker run \ - --network="${network_name}" \ - --env "TEST_ES_SERVER=${elasticsearch_url}" \ - --env "ELASTIC_PASSWORD=${elastic_password}" \ - --env "TEST_SUITE=${TEST_SUITE}" \ - --env "ELASTIC_USER=elastic" \ - --env "BUILDKITE=true" \ - --volume "$repo/junit-output:/junit-output" \ - --name elasticsearch-js \ - --rm \ - elastic/elasticsearch-js \ - bash -c "npm run test:integration; [ -f ./$TEST_SUITE-report-junit.xml ] && mv ./$TEST_SUITE-report-junit.xml /junit-output/junit-$BUILDKITE_JOB_ID.xml || echo 'No JUnit artifact found'" + --network="${network_name}" \ + --env TEST_ES_STACK \ + --env STACK_VERSION \ + --env GITHUB_TOKEN \ + --env "TEST_ES_SERVER=${elasticsearch_url}" \ + --env "ELASTIC_PASSWORD=${elastic_password}" \ + --env "ELASTIC_USER=elastic" \ + --env "BUILDKITE=true" \ + --volume "/usr/src/app/node_modules" \ + --volume "$repo:/usr/src/app" \ + --volume "$repo/junit-output:/junit-output" \ + --name elasticsearch-js \ + --rm \ + elastic/elasticsearch-js \ + bash -c "npm run test:integration; [ -f ./report-junit.xml ] && mv ./report-junit.xml /junit-output/junit-$BUILDKITE_JOB_ID.xml || echo 'No JUnit artifact found'" diff --git a/.dockerignore b/.dockerignore index a448fae9c..c2031b20f 100644 --- a/.dockerignore +++ b/.dockerignore @@ -6,3 +6,6 @@ elasticsearch lib junit-output .tap +rest-api-spec +yaml-rest-tests +generated-tests diff --git a/.github/workflows/nodejs.yml b/.github/workflows/nodejs.yml index bc73e0713..42074cadc 100644 --- a/.github/workflows/nodejs.yml +++ b/.github/workflows/nodejs.yml @@ -83,6 +83,9 @@ jobs: run: | npm run license-checker + - name: SPDX header check + run: npm run license-header + test-bun: name: Test Bun runs-on: ${{ matrix.os }} diff --git a/.gitignore b/.gitignore index adec49623..07e49ff7b 100644 --- a/.gitignore +++ b/.gitignore @@ -68,3 +68,7 @@ bun.lockb test-results processinfo .tap +rest-api-spec +yaml-rest-tests +generated-tests +schema diff --git a/.npmignore b/.npmignore index 8a921bbd6..3f909d8c7 100644 --- a/.npmignore +++ b/.npmignore @@ -74,3 +74,6 @@ CONTRIBUTING.md src bun.lockb .tap +rest-api-spec +yaml-rest-tests +generated-tests diff --git a/docs/docset.yml b/docs/docset.yml index 27f8dc2d6..cea34c4d5 100644 --- a/docs/docset.yml +++ b/docs/docset.yml @@ -8,482 +8,5 @@ toc: - toc: reference - toc: release-notes subs: - ref: "https://www.elastic.co/guide/en/elasticsearch/reference/current" - ref-bare: "https://www.elastic.co/guide/en/elasticsearch/reference" - ref-8x: "https://www.elastic.co/guide/en/elasticsearch/reference/8.1" - ref-80: "https://www.elastic.co/guide/en/elasticsearch/reference/8.0" - ref-7x: "https://www.elastic.co/guide/en/elasticsearch/reference/7.17" - ref-70: "https://www.elastic.co/guide/en/elasticsearch/reference/7.0" - ref-60: "https://www.elastic.co/guide/en/elasticsearch/reference/6.0" - ref-64: "https://www.elastic.co/guide/en/elasticsearch/reference/6.4" - xpack-ref: "https://www.elastic.co/guide/en/x-pack/6.2" - logstash-ref: "https://www.elastic.co/guide/en/logstash/current" - kibana-ref: "https://www.elastic.co/guide/en/kibana/current" - kibana-ref-all: "https://www.elastic.co/guide/en/kibana" - beats-ref-root: "https://www.elastic.co/guide/en/beats" - beats-ref: "https://www.elastic.co/guide/en/beats/libbeat/current" - beats-ref-60: "https://www.elastic.co/guide/en/beats/libbeat/6.0" - beats-ref-63: "https://www.elastic.co/guide/en/beats/libbeat/6.3" - beats-devguide: "https://www.elastic.co/guide/en/beats/devguide/current" - auditbeat-ref: "https://www.elastic.co/guide/en/beats/auditbeat/current" - packetbeat-ref: "https://www.elastic.co/guide/en/beats/packetbeat/current" - metricbeat-ref: "https://www.elastic.co/guide/en/beats/metricbeat/current" - filebeat-ref: "https://www.elastic.co/guide/en/beats/filebeat/current" - functionbeat-ref: "https://www.elastic.co/guide/en/beats/functionbeat/current" - winlogbeat-ref: "https://www.elastic.co/guide/en/beats/winlogbeat/current" - heartbeat-ref: "https://www.elastic.co/guide/en/beats/heartbeat/current" - journalbeat-ref: "https://www.elastic.co/guide/en/beats/journalbeat/current" - ingest-guide: "https://www.elastic.co/guide/en/ingest/current" - fleet-guide: "https://www.elastic.co/guide/en/fleet/current" - apm-guide-ref: "https://www.elastic.co/guide/en/apm/guide/current" - apm-guide-7x: "https://www.elastic.co/guide/en/apm/guide/7.17" - apm-app-ref: "https://www.elastic.co/guide/en/kibana/current" - apm-agents-ref: "https://www.elastic.co/guide/en/apm/agent" - apm-android-ref: "https://www.elastic.co/guide/en/apm/agent/android/current" - apm-py-ref: "https://www.elastic.co/guide/en/apm/agent/python/current" - apm-py-ref-3x: "https://www.elastic.co/guide/en/apm/agent/python/3.x" - apm-node-ref-index: "https://www.elastic.co/guide/en/apm/agent/nodejs" - apm-node-ref: "https://www.elastic.co/guide/en/apm/agent/nodejs/current" - apm-node-ref-1x: "https://www.elastic.co/guide/en/apm/agent/nodejs/1.x" - apm-rum-ref: "https://www.elastic.co/guide/en/apm/agent/rum-js/current" - apm-ruby-ref: "https://www.elastic.co/guide/en/apm/agent/ruby/current" - apm-java-ref: "https://www.elastic.co/guide/en/apm/agent/java/current" - apm-go-ref: "https://www.elastic.co/guide/en/apm/agent/go/current" - apm-dotnet-ref: "https://www.elastic.co/guide/en/apm/agent/dotnet/current" - apm-php-ref: "https://www.elastic.co/guide/en/apm/agent/php/current" - apm-ios-ref: "https://www.elastic.co/guide/en/apm/agent/swift/current" - apm-lambda-ref: "https://www.elastic.co/guide/en/apm/lambda/current" - apm-attacher-ref: "https://www.elastic.co/guide/en/apm/attacher/current" - docker-logging-ref: "https://www.elastic.co/guide/en/beats/loggingplugin/current" - esf-ref: "https://www.elastic.co/guide/en/esf/current" - kinesis-firehose-ref: "https://www.elastic.co/guide/en/kinesis/%7B%7Bkinesis_version%7D%7D" - estc-welcome-current: "https://www.elastic.co/guide/en/starting-with-the-elasticsearch-platform-and-its-solutions/current" - estc-welcome: "https://www.elastic.co/guide/en/starting-with-the-elasticsearch-platform-and-its-solutions/current" - estc-welcome-all: "https://www.elastic.co/guide/en/starting-with-the-elasticsearch-platform-and-its-solutions" - hadoop-ref: "https://www.elastic.co/guide/en/elasticsearch/hadoop/current" - stack-ref: "https://www.elastic.co/guide/en/elastic-stack/current" - stack-ref-67: "https://www.elastic.co/guide/en/elastic-stack/6.7" - stack-ref-68: "https://www.elastic.co/guide/en/elastic-stack/6.8" - stack-ref-70: "https://www.elastic.co/guide/en/elastic-stack/7.0" - stack-ref-80: "https://www.elastic.co/guide/en/elastic-stack/8.0" - stack-ov: "https://www.elastic.co/guide/en/elastic-stack-overview/current" - stack-gs: "https://www.elastic.co/guide/en/elastic-stack-get-started/current" - stack-gs-current: "https://www.elastic.co/guide/en/elastic-stack-get-started/current" - javaclient: "https://www.elastic.co/guide/en/elasticsearch/client/java-api/current" - java-api-client: "https://www.elastic.co/guide/en/elasticsearch/client/java-api-client/current" - java-rest: "https://www.elastic.co/guide/en/elasticsearch/client/java-rest/current" - jsclient: "https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current" - jsclient-current: "https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current" - es-ruby-client: "https://www.elastic.co/guide/en/elasticsearch/client/ruby-api/current" - es-dotnet-client: "https://www.elastic.co/guide/en/elasticsearch/client/net-api/current" - es-php-client: "https://www.elastic.co/guide/en/elasticsearch/client/php-api/current" - es-python-client: "https://www.elastic.co/guide/en/elasticsearch/client/python-api/current" - defguide: "https://www.elastic.co/guide/en/elasticsearch/guide/2.x" - painless: "https://www.elastic.co/guide/en/elasticsearch/painless/current" - plugins: "https://www.elastic.co/guide/en/elasticsearch/plugins/current" - plugins-8x: "https://www.elastic.co/guide/en/elasticsearch/plugins/8.1" - plugins-7x: "https://www.elastic.co/guide/en/elasticsearch/plugins/7.17" - plugins-6x: "https://www.elastic.co/guide/en/elasticsearch/plugins/6.8" - glossary: "https://www.elastic.co/guide/en/elastic-stack-glossary/current" - upgrade_guide: "https://www.elastic.co/products/upgrade_guide" - blog-ref: "https://www.elastic.co/blog/" - curator-ref: "https://www.elastic.co/guide/en/elasticsearch/client/curator/current" - curator-ref-current: "https://www.elastic.co/guide/en/elasticsearch/client/curator/current" - metrics-ref: "https://www.elastic.co/guide/en/metrics/current" - metrics-guide: "https://www.elastic.co/guide/en/metrics/guide/current" - logs-ref: "https://www.elastic.co/guide/en/logs/current" - logs-guide: "https://www.elastic.co/guide/en/logs/guide/current" - uptime-guide: "https://www.elastic.co/guide/en/uptime/current" - observability-guide: "https://www.elastic.co/guide/en/observability/current" - observability-guide-all: "https://www.elastic.co/guide/en/observability" - siem-guide: "https://www.elastic.co/guide/en/siem/guide/current" - security-guide: "https://www.elastic.co/guide/en/security/current" - security-guide-all: "https://www.elastic.co/guide/en/security" - endpoint-guide: "https://www.elastic.co/guide/en/endpoint/current" - sql-odbc: "https://www.elastic.co/guide/en/elasticsearch/sql-odbc/current" - ecs-ref: "https://www.elastic.co/guide/en/ecs/current" - ecs-logging-ref: "https://www.elastic.co/guide/en/ecs-logging/overview/current" - ecs-logging-go-logrus-ref: "https://www.elastic.co/guide/en/ecs-logging/go-logrus/current" - ecs-logging-go-zap-ref: "https://www.elastic.co/guide/en/ecs-logging/go-zap/current" - ecs-logging-go-zerolog-ref: "https://www.elastic.co/guide/en/ecs-logging/go-zap/current" - ecs-logging-java-ref: "https://www.elastic.co/guide/en/ecs-logging/java/current" - ecs-logging-dotnet-ref: "https://www.elastic.co/guide/en/ecs-logging/dotnet/current" - ecs-logging-nodejs-ref: "https://www.elastic.co/guide/en/ecs-logging/nodejs/current" - ecs-logging-php-ref: "https://www.elastic.co/guide/en/ecs-logging/php/current" - ecs-logging-python-ref: "https://www.elastic.co/guide/en/ecs-logging/python/current" - ecs-logging-ruby-ref: "https://www.elastic.co/guide/en/ecs-logging/ruby/current" - ml-docs: "https://www.elastic.co/guide/en/machine-learning/current" - eland-docs: "https://www.elastic.co/guide/en/elasticsearch/client/eland/current" - eql-ref: "https://eql.readthedocs.io/en/latest/query-guide" - extendtrial: "https://www.elastic.co/trialextension" - wikipedia: "https://en.wikipedia.org/wiki" - forum: "https://discuss.elastic.co/" - xpack-forum: "https://discuss.elastic.co/c/50-x-pack" - security-forum: "https://discuss.elastic.co/c/x-pack/shield" - watcher-forum: "https://discuss.elastic.co/c/x-pack/watcher" - monitoring-forum: "https://discuss.elastic.co/c/x-pack/marvel" - graph-forum: "https://discuss.elastic.co/c/x-pack/graph" - apm-forum: "https://discuss.elastic.co/c/apm" - enterprise-search-ref: "https://www.elastic.co/guide/en/enterprise-search/current" - app-search-ref: "https://www.elastic.co/guide/en/app-search/current" - workplace-search-ref: "https://www.elastic.co/guide/en/workplace-search/current" - enterprise-search-node-ref: "https://www.elastic.co/guide/en/enterprise-search-clients/enterprise-search-node/current" - enterprise-search-php-ref: "https://www.elastic.co/guide/en/enterprise-search-clients/php/current" - enterprise-search-python-ref: "https://www.elastic.co/guide/en/enterprise-search-clients/python/current" - enterprise-search-ruby-ref: "https://www.elastic.co/guide/en/enterprise-search-clients/ruby/current" - elastic-maps-service: "https://maps.elastic.co/" - integrations-docs: "https://docs.elastic.co/en/integrations" - integrations-devguide: "https://www.elastic.co/guide/en/integrations-developer/current" - time-units: "https://www.elastic.co/guide/en/elasticsearch/reference/current/api-conventions.html#time-units" - byte-units: "https://www.elastic.co/guide/en/elasticsearch/reference/current/api-conventions.html#byte-units" - apm-py-ref-v: "https://www.elastic.co/guide/en/apm/agent/python/current" - apm-node-ref-v: "https://www.elastic.co/guide/en/apm/agent/nodejs/current" - apm-rum-ref-v: "https://www.elastic.co/guide/en/apm/agent/rum-js/current" - apm-ruby-ref-v: "https://www.elastic.co/guide/en/apm/agent/ruby/current" - apm-java-ref-v: "https://www.elastic.co/guide/en/apm/agent/java/current" - apm-go-ref-v: "https://www.elastic.co/guide/en/apm/agent/go/current" - apm-ios-ref-v: "https://www.elastic.co/guide/en/apm/agent/swift/current" - apm-dotnet-ref-v: "https://www.elastic.co/guide/en/apm/agent/dotnet/current" - apm-php-ref-v: "https://www.elastic.co/guide/en/apm/agent/php/current" - ecloud: "Elastic Cloud" - esf: "Elastic Serverless Forwarder" - ess: "Elasticsearch Service" - ece: "Elastic Cloud Enterprise" - eck: "Elastic Cloud on Kubernetes" - serverless-full: "Elastic Cloud Serverless" - serverless-short: "Serverless" - es-serverless: "Elasticsearch Serverless" - es3: "Elasticsearch Serverless" - obs-serverless: "Elastic Observability Serverless" - sec-serverless: "Elastic Security Serverless" - serverless-docs: "https://docs.elastic.co/serverless" - cloud: "https://www.elastic.co/guide/en/cloud/current" - ess-utm-params: "?page=docs&placement=docs-body" - ess-baymax: "?page=docs&placement=docs-body" - ess-trial: "https://cloud.elastic.co/registration?page=docs&placement=docs-body" - ess-product: "https://www.elastic.co/cloud/elasticsearch-service?page=docs&placement=docs-body" - ess-console: "https://cloud.elastic.co/?page=docs&placement=docs-body" - ess-console-name: "Elasticsearch Service Console" - ess-deployments: "https://cloud.elastic.co/deployments?page=docs&placement=docs-body" - ece-ref: "https://www.elastic.co/guide/en/cloud-enterprise/current" - eck-ref: "https://www.elastic.co/guide/en/cloud-on-k8s/current" - ess-leadin: "You can run Elasticsearch on your own hardware or use our hosted Elasticsearch Service that is available on AWS, GCP, and Azure. https://cloud.elastic.co/registration{ess-utm-params}[Try the Elasticsearch Service for free]." - ess-leadin-short: "Our hosted Elasticsearch Service is available on AWS, GCP, and Azure, and you can https://cloud.elastic.co/registration{ess-utm-params}[try it for free]." - ess-icon: "image:https://doc-icons.s3.us-east-2.amazonaws.com/logo_cloud.svg[link=\"https://cloud.elastic.co/registration%7Bess-utm-params%7D/", title=\"Supported on Elasticsearch Service\"]" - ece-icon: "image:https://doc-icons.s3.us-east-2.amazonaws.com/logo_cloud_ece.svg[link=\"https://cloud.elastic.co/registration%7Bess-utm-params%7D/", title=\"Supported on Elastic Cloud Enterprise\"]" - cloud-only: "This feature is designed for indirect use by https://cloud.elastic.co/registration{ess-utm-params}[Elasticsearch Service], https://www.elastic.co/guide/en/cloud-enterprise/{ece-version-link}[Elastic Cloud Enterprise], and https://www.elastic.co/guide/en/cloud-on-k8s/current[Elastic Cloud on Kubernetes]. Direct use is not supported." - ess-setting-change: "image:https://doc-icons.s3.us-east-2.amazonaws.com/logo_cloud.svg[link=\"{ess-trial}\", title=\"Supported on {ess}\"] indicates a change to a supported https://www.elastic.co/guide/en/cloud/current/ec-add-user-settings.html[user setting] for Elasticsearch Service." - ess-skip-section: "If you use Elasticsearch Service, skip this section. Elasticsearch Service handles these changes for you." - api-cloud: "https://www.elastic.co/docs/api/doc/cloud" - api-ece: "https://www.elastic.co/docs/api/doc/cloud-enterprise" - api-kibana-serverless: "https://www.elastic.co/docs/api/doc/serverless" - es-feature-flag: "This feature is in development and not yet available for use. This documentation is provided for informational purposes only." - es-ref-dir: "'{{elasticsearch-root}}/docs/reference'" - apm-app: "APM app" - uptime-app: "Uptime app" - synthetics-app: "Synthetics app" - logs-app: "Logs app" - metrics-app: "Metrics app" - infrastructure-app: "Infrastructure app" - siem-app: "SIEM app" - security-app: "Elastic Security app" - ml-app: "Machine Learning" - dev-tools-app: "Dev Tools" - ingest-manager-app: "Ingest Manager" - stack-manage-app: "Stack Management" - stack-monitor-app: "Stack Monitoring" - alerts-ui: "Alerts and Actions" - rules-ui: "Rules" - rac-ui: "Rules and Connectors" - connectors-ui: "Connectors" - connectors-feature: "Actions and Connectors" - stack-rules-feature: "Stack Rules" - user-experience: "User Experience" - ems: "Elastic Maps Service" - ems-init: "EMS" - hosted-ems: "Elastic Maps Server" - ipm-app: "Index Pattern Management" - ingest-pipelines: "ingest pipelines" - ingest-pipelines-app: "Ingest Pipelines" - ingest-pipelines-cap: "Ingest pipelines" - ls-pipelines: "Logstash pipelines" - ls-pipelines-app: "Logstash Pipelines" - maint-windows: "maintenance windows" - maint-windows-app: "Maintenance Windows" - maint-windows-cap: "Maintenance windows" - custom-roles-app: "Custom Roles" - data-source: "data view" - data-sources: "data views" - data-source-caps: "Data View" - data-sources-caps: "Data Views" - data-source-cap: "Data view" - data-sources-cap: "Data views" - project-settings: "Project settings" - manage-app: "Management" - index-manage-app: "Index Management" - data-views-app: "Data Views" - rules-app: "Rules" - saved-objects-app: "Saved Objects" - tags-app: "Tags" - api-keys-app: "API keys" - transforms-app: "Transforms" - connectors-app: "Connectors" - files-app: "Files" - reports-app: "Reports" - maps-app: "Maps" - alerts-app: "Alerts" - crawler: "Enterprise Search web crawler" - ents: "Enterprise Search" - app-search-crawler: "App Search web crawler" - agent: "Elastic Agent" - agents: "Elastic Agents" - fleet: "Fleet" - fleet-server: "Fleet Server" - integrations-server: "Integrations Server" - ingest-manager: "Ingest Manager" - ingest-management: "ingest management" - package-manager: "Elastic Package Manager" - integrations: "Integrations" - package-registry: "Elastic Package Registry" - artifact-registry: "Elastic Artifact Registry" - aws: "AWS" stack: "Elastic Stack" - xpack: "X-Pack" es: "Elasticsearch" - kib: "Kibana" - esms: "Elastic Stack Monitoring Service" - esms-init: "ESMS" - ls: "Logstash" - beats: "Beats" - auditbeat: "Auditbeat" - filebeat: "Filebeat" - heartbeat: "Heartbeat" - metricbeat: "Metricbeat" - packetbeat: "Packetbeat" - winlogbeat: "Winlogbeat" - functionbeat: "Functionbeat" - journalbeat: "Journalbeat" - es-sql: "Elasticsearch SQL" - esql: "ES|QL" - elastic-agent: "Elastic Agent" - k8s: "Kubernetes" - log-driver-long: "Elastic Logging Plugin for Docker" - security: "X-Pack security" - security-features: "security features" - operator-feature: "operator privileges feature" - es-security-features: "Elasticsearch security features" - stack-security-features: "Elastic Stack security features" - endpoint-sec: "Endpoint Security" - endpoint-cloud-sec: "Endpoint and Cloud Security" - elastic-defend: "Elastic Defend" - elastic-sec: "Elastic Security" - elastic-endpoint: "Elastic Endpoint" - swimlane: "Swimlane" - sn: "ServiceNow" - sn-itsm: "ServiceNow ITSM" - sn-itom: "ServiceNow ITOM" - sn-sir: "ServiceNow SecOps" - jira: "Jira" - ibm-r: "IBM Resilient" - webhook: "Webhook" - webhook-cm: "Webhook - Case Management" - opsgenie: "Opsgenie" - bedrock: "Amazon Bedrock" - gemini: "Google Gemini" - hive: "TheHive" - monitoring: "X-Pack monitoring" - monitor-features: "monitoring features" - stack-monitor-features: "Elastic Stack monitoring features" - watcher: "Watcher" - alert-features: "alerting features" - reporting: "X-Pack reporting" - report-features: "reporting features" - graph: "X-Pack graph" - graph-features: "graph analytics features" - searchprofiler: "Search Profiler" - xpackml: "X-Pack machine learning" - ml: "machine learning" - ml-cap: "Machine learning" - ml-init: "ML" - ml-features: "machine learning features" - stack-ml-features: "Elastic Stack machine learning features" - ccr: "cross-cluster replication" - ccr-cap: "Cross-cluster replication" - ccr-init: "CCR" - ccs: "cross-cluster search" - ccs-cap: "Cross-cluster search" - ccs-init: "CCS" - ilm: "index lifecycle management" - ilm-cap: "Index lifecycle management" - ilm-init: "ILM" - dlm: "data lifecycle management" - dlm-cap: "Data lifecycle management" - dlm-init: "DLM" - search-snap: "searchable snapshot" - search-snaps: "searchable snapshots" - search-snaps-cap: "Searchable snapshots" - slm: "snapshot lifecycle management" - slm-cap: "Snapshot lifecycle management" - slm-init: "SLM" - rollup-features: "data rollup features" - ipm: "index pattern management" - ipm-cap: "Index pattern" - rollup: "rollup" - rollup-cap: "Rollup" - rollups: "rollups" - rollups-cap: "Rollups" - rollup-job: "rollup job" - rollup-jobs: "rollup jobs" - rollup-jobs-cap: "Rollup jobs" - dfeed: "datafeed" - dfeeds: "datafeeds" - dfeed-cap: "Datafeed" - dfeeds-cap: "Datafeeds" - ml-jobs: "machine learning jobs" - ml-jobs-cap: "Machine learning jobs" - anomaly-detect: "anomaly detection" - anomaly-detect-cap: "Anomaly detection" - anomaly-job: "anomaly detection job" - anomaly-jobs: "anomaly detection jobs" - anomaly-jobs-cap: "Anomaly detection jobs" - dataframe: "data frame" - dataframes: "data frames" - dataframe-cap: "Data frame" - dataframes-cap: "Data frames" - watcher-transform: "payload transform" - watcher-transforms: "payload transforms" - watcher-transform-cap: "Payload transform" - watcher-transforms-cap: "Payload transforms" - transform: "transform" - transforms: "transforms" - transform-cap: "Transform" - transforms-cap: "Transforms" - dataframe-transform: "transform" - dataframe-transform-cap: "Transform" - dataframe-transforms: "transforms" - dataframe-transforms-cap: "Transforms" - dfanalytics-cap: "Data frame analytics" - dfanalytics: "data frame analytics" - dataframe-analytics-config: "'{dataframe} analytics config'" - dfanalytics-job: "'{dataframe} analytics job'" - dfanalytics-jobs: "'{dataframe} analytics jobs'" - dfanalytics-jobs-cap: "'{dataframe-cap} analytics jobs'" - cdataframe: "continuous data frame" - cdataframes: "continuous data frames" - cdataframe-cap: "Continuous data frame" - cdataframes-cap: "Continuous data frames" - cdataframe-transform: "continuous transform" - cdataframe-transforms: "continuous transforms" - cdataframe-transforms-cap: "Continuous transforms" - ctransform: "continuous transform" - ctransform-cap: "Continuous transform" - ctransforms: "continuous transforms" - ctransforms-cap: "Continuous transforms" - oldetection: "outlier detection" - oldetection-cap: "Outlier detection" - olscore: "outlier score" - olscores: "outlier scores" - fiscore: "feature influence score" - evaluatedf-api: "evaluate {dataframe} analytics API" - evaluatedf-api-cap: "Evaluate {dataframe} analytics API" - binarysc: "binary soft classification" - binarysc-cap: "Binary soft classification" - regression: "regression" - regression-cap: "Regression" - reganalysis: "regression analysis" - reganalysis-cap: "Regression analysis" - depvar: "dependent variable" - feature-var: "feature variable" - feature-vars: "feature variables" - feature-vars-cap: "Feature variables" - classification: "classification" - classification-cap: "Classification" - classanalysis: "classification analysis" - classanalysis-cap: "Classification analysis" - infer-cap: "Inference" - infer: "inference" - lang-ident-cap: "Language identification" - lang-ident: "language identification" - data-viz: "Data Visualizer" - file-data-viz: "File Data Visualizer" - feat-imp: "feature importance" - feat-imp-cap: "Feature importance" - nlp: "natural language processing" - nlp-cap: "Natural language processing" - apm-agent: "APM agent" - apm-go-agent: "Elastic APM Go agent" - apm-go-agents: "Elastic APM Go agents" - apm-ios-agent: "Elastic APM iOS agent" - apm-ios-agents: "Elastic APM iOS agents" - apm-java-agent: "Elastic APM Java agent" - apm-java-agents: "Elastic APM Java agents" - apm-dotnet-agent: "Elastic APM .NET agent" - apm-dotnet-agents: "Elastic APM .NET agents" - apm-node-agent: "Elastic APM Node.js agent" - apm-node-agents: "Elastic APM Node.js agents" - apm-php-agent: "Elastic APM PHP agent" - apm-php-agents: "Elastic APM PHP agents" - apm-py-agent: "Elastic APM Python agent" - apm-py-agents: "Elastic APM Python agents" - apm-ruby-agent: "Elastic APM Ruby agent" - apm-ruby-agents: "Elastic APM Ruby agents" - apm-rum-agent: "Elastic APM Real User Monitoring (RUM) JavaScript agent" - apm-rum-agents: "Elastic APM RUM JavaScript agents" - apm-lambda-ext: "Elastic APM AWS Lambda extension" - project-monitors: "project monitors" - project-monitors-cap: "Project monitors" - private-location: "Private Location" - private-locations: "Private Locations" - pwd: "YOUR_PASSWORD" - esh: "ES-Hadoop" - default-dist: "default distribution" - oss-dist: "OSS-only distribution" - observability: "Observability" - api-request-title: "Request" - api-prereq-title: "Prerequisites" - api-description-title: "Description" - api-path-parms-title: "Path parameters" - api-query-parms-title: "Query parameters" - api-request-body-title: "Request body" - api-response-codes-title: "Response codes" - api-response-body-title: "Response body" - api-example-title: "Example" - api-examples-title: "Examples" - api-definitions-title: "Properties" - multi-arg: "†footnoteref:[multi-arg,This parameter accepts multiple arguments.]" - multi-arg-ref: "†footnoteref:[multi-arg]" - yes-icon: "image:https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png[Yes,20,15]" - no-icon: "image:https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png[No,20,15]" - es-repo: "https://github.com/elastic/elasticsearch/" - es-issue: "https://github.com/elastic/elasticsearch/issues/" - es-pull: "https://github.com/elastic/elasticsearch/pull/" - es-commit: "https://github.com/elastic/elasticsearch/commit/" - kib-repo: "https://github.com/elastic/kibana/" - kib-issue: "https://github.com/elastic/kibana/issues/" - kibana-issue: "'{kib-repo}issues/'" - kib-pull: "https://github.com/elastic/kibana/pull/" - kibana-pull: "'{kib-repo}pull/'" - kib-commit: "https://github.com/elastic/kibana/commit/" - ml-repo: "https://github.com/elastic/ml-cpp/" - ml-issue: "https://github.com/elastic/ml-cpp/issues/" - ml-pull: "https://github.com/elastic/ml-cpp/pull/" - ml-commit: "https://github.com/elastic/ml-cpp/commit/" - apm-repo: "https://github.com/elastic/apm-server/" - apm-issue: "https://github.com/elastic/apm-server/issues/" - apm-pull: "https://github.com/elastic/apm-server/pull/" - kibana-blob: "https://github.com/elastic/kibana/blob/current/" - apm-get-started-ref: "https://www.elastic.co/guide/en/apm/get-started/current" - apm-server-ref: "https://www.elastic.co/guide/en/apm/server/current" - apm-server-ref-v: "https://www.elastic.co/guide/en/apm/server/current" - apm-server-ref-m: "https://www.elastic.co/guide/en/apm/server/master" - apm-server-ref-62: "https://www.elastic.co/guide/en/apm/server/6.2" - apm-server-ref-64: "https://www.elastic.co/guide/en/apm/server/6.4" - apm-server-ref-70: "https://www.elastic.co/guide/en/apm/server/7.0" - apm-overview-ref-v: "https://www.elastic.co/guide/en/apm/get-started/current" - apm-overview-ref-70: "https://www.elastic.co/guide/en/apm/get-started/7.0" - apm-overview-ref-m: "https://www.elastic.co/guide/en/apm/get-started/master" - infra-guide: "https://www.elastic.co/guide/en/infrastructure/guide/current" - a-data-source: "a data view" - icon-bug: "pass:[]" - icon-checkInCircleFilled: "pass:[]" - icon-warningFilled: "pass:[]" diff --git a/docs/examples/proxy/api/autocomplete.js b/docs/examples/proxy/api/autocomplete.js index fb18298cf..fdd70e11f 100644 --- a/docs/examples/proxy/api/autocomplete.js +++ b/docs/examples/proxy/api/autocomplete.js @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ // IMPORTANT: this is not a production ready code & purely for demonstration purposes, diff --git a/docs/examples/proxy/api/delete.js b/docs/examples/proxy/api/delete.js index b76108428..66de08635 100644 --- a/docs/examples/proxy/api/delete.js +++ b/docs/examples/proxy/api/delete.js @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ // IMPORTANT: this is not a production ready code & purely for demonstration purposes, diff --git a/docs/examples/proxy/api/index.js b/docs/examples/proxy/api/index.js index 901139713..446ba6757 100644 --- a/docs/examples/proxy/api/index.js +++ b/docs/examples/proxy/api/index.js @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ // IMPORTANT: this is not a production ready code & purely for demonstration purposes, diff --git a/docs/examples/proxy/api/search.js b/docs/examples/proxy/api/search.js index 8659e08f4..116ef0676 100644 --- a/docs/examples/proxy/api/search.js +++ b/docs/examples/proxy/api/search.js @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ // IMPORTANT: this is not a production ready code & purely for demonstration purposes, diff --git a/docs/examples/proxy/utils/authorize.js b/docs/examples/proxy/utils/authorize.js index 97bb9c4b5..74370a5ce 100644 --- a/docs/examples/proxy/utils/authorize.js +++ b/docs/examples/proxy/utils/authorize.js @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ // IMPORTANT: this is not a production ready code & purely for demonstration purposes, diff --git a/docs/examples/proxy/utils/prepare-elasticsearch.js b/docs/examples/proxy/utils/prepare-elasticsearch.js index bf833f0c2..6850aaae4 100644 --- a/docs/examples/proxy/utils/prepare-elasticsearch.js +++ b/docs/examples/proxy/utils/prepare-elasticsearch.js @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ 'use strict' diff --git a/docs/reference.asciidoc b/docs/reference.asciidoc new file mode 100644 index 000000000..b038b633d --- /dev/null +++ b/docs/reference.asciidoc @@ -0,0 +1,16115 @@ +[[api-reference]] +//////// +=========================================================================================================================== +|| || +|| || +|| || +|| ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || +|| ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || +|| ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || +|| ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || +|| ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || +|| ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || +|| || +|| || +|| This file is autogenerated, DO NOT send pull requests that changes this file directly. || +|| You should update the script that does the generation, which can be found in: || +|| https://github.com/elastic/elastic-client-generator-js || +|| || +|| You can run the script with the following command: || +|| npm run elasticsearch -- --version || +|| || +|| || +|| || +=========================================================================================================================== +//////// +== API Reference + +[discrete] +=== bulk +Bulk index or delete documents. +Perform multiple `index`, `create`, `delete`, and `update` actions in a single request. +This reduces overhead and can greatly increase indexing speed. + +If the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or index alias: + +* To use the `create` action, you must have the `create_doc`, `create`, `index`, or `write` index privilege. Data streams support only the `create` action. +* To use the `index` action, you must have the `create`, `index`, or `write` index privilege. +* To use the `delete` action, you must have the `delete` or `write` index privilege. +* To use the `update` action, you must have the `index` or `write` index privilege. +* To automatically create a data stream or index with a bulk API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege. +* To make the result of a bulk operation visible to search using the `refresh` parameter, you must have the `maintenance` or `manage` index privilege. + +Automatic data stream creation requires a matching index template with data stream enabled. + +The actions are specified in the request body using a newline delimited JSON (NDJSON) structure: + +---- +action_and_meta_data\n +optional_source\n +action_and_meta_data\n +optional_source\n +.... +action_and_meta_data\n +optional_source\n +---- + +The `index` and `create` actions expect a source on the next line and have the same semantics as the `op_type` parameter in the standard index API. +A `create` action fails if a document with the same ID already exists in the target +An `index` action adds or replaces a document as necessary. + +NOTE: Data streams support only the `create` action. +To update or delete a document in a data stream, you must target the backing index containing the document. + +An `update` action expects that the partial doc, upsert, and script and its options are specified on the next line. + +A `delete` action does not expect a source on the next line and has the same semantics as the standard delete API. + +NOTE: The final line of data must end with a newline character (`\n`). +Each newline character may be preceded by a carriage return (`\r`). +When sending NDJSON data to the `_bulk` endpoint, use a `Content-Type` header of `application/json` or `application/x-ndjson`. +Because this format uses literal newline characters (`\n`) as delimiters, make sure that the JSON actions and sources are not pretty printed. + +If you provide a target in the request path, it is used for any actions that don't explicitly specify an `_index` argument. + +A note on the format: the idea here is to make processing as fast as possible. +As some of the actions are redirected to other shards on other nodes, only `action_meta_data` is parsed on the receiving node side. + +Client libraries using this protocol should try and strive to do something similar on the client side, and reduce buffering as much as possible. + +There is no "correct" number of actions to perform in a single bulk request. +Experiment with different settings to find the optimal size for your particular workload. +Note that Elasticsearch limits the maximum size of a HTTP request to 100mb by default so clients must ensure that no request exceeds this size. +It is not possible to index a single document that exceeds the size limit, so you must pre-process any such documents into smaller pieces before sending them to Elasticsearch. +For instance, split documents into pages or chapters before indexing them, or store raw binary data in a system outside Elasticsearch and replace the raw data with a link to the external system in the documents that you send to Elasticsearch. + +**Client suppport for bulk requests** + +Some of the officially supported clients provide helpers to assist with bulk requests and reindexing: + +* Go: Check out `esutil.BulkIndexer` +* Perl: Check out `Search::Elasticsearch::Client::5_0::Bulk` and `Search::Elasticsearch::Client::5_0::Scroll` +* Python: Check out `elasticsearch.helpers.*` +* JavaScript: Check out `client.helpers.*` +* .NET: Check out `BulkAllObservable` +* PHP: Check out bulk indexing. + +**Submitting bulk requests with cURL** + +If you're providing text file input to `curl`, you must use the `--data-binary` flag instead of plain `-d`. +The latter doesn't preserve newlines. For example: + +---- +$ cat requests +{ "index" : { "_index" : "test", "_id" : "1" } } +{ "field1" : "value1" } +$ curl -s -H "Content-Type: application/x-ndjson" -XPOST localhost:9200/_bulk --data-binary "@requests"; echo +{"took":7, "errors": false, "items":[{"index":{"_index":"test","_id":"1","_version":1,"result":"created","forced_refresh":false}}]} +---- + +**Optimistic concurrency control** + +Each `index` and `delete` action within a bulk API call may include the `if_seq_no` and `if_primary_term` parameters in their respective action and meta data lines. +The `if_seq_no` and `if_primary_term` parameters control how operations are run, based on the last modification to existing documents. See Optimistic concurrency control for more details. + +**Versioning** + +Each bulk item can include the version value using the `version` field. +It automatically follows the behavior of the index or delete operation based on the `_version` mapping. +It also support the `version_type`. + +**Routing** + +Each bulk item can include the routing value using the `routing` field. +It automatically follows the behavior of the index or delete operation based on the `_routing` mapping. + +NOTE: Data streams do not support custom routing unless they were created with the `allow_custom_routing` setting enabled in the template. + +**Wait for active shards** + +When making bulk calls, you can set the `wait_for_active_shards` parameter to require a minimum number of shard copies to be active before starting to process the bulk request. + +**Refresh** + +Control when the changes made by this request are visible to search. + +NOTE: Only the shards that receive the bulk request will be affected by refresh. +Imagine a `_bulk?refresh=wait_for` request with three documents in it that happen to be routed to different shards in an index with five shards. +The request will only wait for those three shards to refresh. +The other two shards that make up the index do not participate in the `_bulk` request at all. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-bulk[Endpoint documentation] +[source,ts] +---- +client.bulk({ ... }) +---- +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (Optional, string)*: The name of the data stream, index, or index alias to perform bulk actions on. +** *`operations` (Optional, { index, create, update, delete } | { detect_noop, doc, doc_as_upsert, script, scripted_upsert, _source, upsert } | object[])* +** *`include_source_on_error` (Optional, boolean)*: True or false if to include the document source in the error message in case of parsing errors. +** *`list_executed_pipelines` (Optional, boolean)*: If `true`, the response will include the ingest pipelines that were run for each index or create. +** *`pipeline` (Optional, string)*: The pipeline identifier to use to preprocess incoming documents. If the index has a default ingest pipeline specified, setting the value to `_none` turns off the default ingest pipeline for this request. If a final pipeline is configured, it will always run regardless of the value of this parameter. +** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true`, Elasticsearch refreshes the affected shards to make this operation visible to search. If `wait_for`, wait for a refresh to make this operation visible to search. If `false`, do nothing with refreshes. Valid values: `true`, `false`, `wait_for`. +** *`routing` (Optional, string)*: A custom value that is used to route operations to a specific shard. +** *`_source` (Optional, boolean | string | string[])*: Indicates whether to return the `_source` field (`true` or `false`) or contains a list of fields to return. +** *`_source_excludes` (Optional, string | string[])*: A list of source fields to exclude from the response. You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. +** *`_source_includes` (Optional, string | string[])*: A list of source fields to include in the response. If this parameter is specified, only these source fields are returned. You can exclude fields from this subset using the `_source_excludes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. +** *`timeout` (Optional, string | -1 | 0)*: The period each action waits for the following operations: automatic index creation, dynamic mapping updates, and waiting for active shards. The default is `1m` (one minute), which guarantees Elasticsearch waits for at least the timeout before failing. The actual wait time could be longer, particularly when multiple waits occur. +** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: The number of shard copies that must be active before proceeding with the operation. Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). The default is `1`, which waits for each primary shard to be active. +** *`require_alias` (Optional, boolean)*: If `true`, the request's actions must target an index alias. +** *`require_data_stream` (Optional, boolean)*: If `true`, the request's actions must target a data stream (existing or to be created). + +[discrete] +=== clear_scroll +Clear a scrolling search. +Clear the search context and results for a scrolling search. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-clear-scroll[Endpoint documentation] +[source,ts] +---- +client.clearScroll({ ... }) +---- +[discrete] +==== Arguments + +* *Request (object):* +** *`scroll_id` (Optional, string | string[])*: A list of scroll IDs to clear. To clear all scroll IDs, use `_all`. IMPORTANT: Scroll IDs can be long. It is recommended to specify scroll IDs in the request body parameter. + +[discrete] +=== close_point_in_time +Close a point in time. +A point in time must be opened explicitly before being used in search requests. +The `keep_alive` parameter tells Elasticsearch how long it should persist. +A point in time is automatically closed when the `keep_alive` period has elapsed. +However, keeping points in time has a cost; close them as soon as they are no longer required for search requests. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-open-point-in-time[Endpoint documentation] +[source,ts] +---- +client.closePointInTime({ id }) +---- +[discrete] +==== Arguments + +* *Request (object):* +** *`id` (string)*: The ID of the point-in-time. + +[discrete] +=== count +Count search results. +Get the number of documents matching a query. + +The query can be provided either by using a simple query string as a parameter, or by defining Query DSL within the request body. +The query is optional. When no query is provided, the API uses `match_all` to count all the documents. + +The count API supports multi-target syntax. You can run a single count API search across multiple data streams and indices. + +The operation is broadcast across all shards. +For each shard ID group, a replica is chosen and the search is run against it. +This means that replicas increase the scalability of the count. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-count[Endpoint documentation] +[source,ts] +---- +client.count({ ... }) +---- +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (Optional, string | string[])*: A list of data streams, indices, and aliases to search. It supports wildcards (`*`). To search all data streams and indices, omit this parameter or use `*` or `_all`. +** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Defines the search query using Query DSL. A request body query cannot be used with the `q` query string parameter. +** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. +** *`analyzer` (Optional, string)*: The analyzer to use for the query string. This parameter can be used only when the `q` query string parameter is specified. +** *`analyze_wildcard` (Optional, boolean)*: If `true`, wildcard and prefix queries are analyzed. This parameter can be used only when the `q` query string parameter is specified. +** *`default_operator` (Optional, Enum("and" | "or"))*: The default operator for query string query: `AND` or `OR`. This parameter can be used only when the `q` query string parameter is specified. +** *`df` (Optional, string)*: The field to use as a default when no field prefix is given in the query string. This parameter can be used only when the `q` query string parameter is specified. +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports a list of values, such as `open,hidden`. +** *`ignore_throttled` (Optional, boolean)*: If `true`, concrete, expanded, or aliased indices are ignored when frozen. +** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error if it targets a missing or closed index. +** *`lenient` (Optional, boolean)*: If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. This parameter can be used only when the `q` query string parameter is specified. +** *`min_score` (Optional, number)*: The minimum `_score` value that documents must have to be included in the result. +** *`preference` (Optional, string)*: The node or shard the operation should be performed on. By default, it is random. +** *`routing` (Optional, string)*: A custom value used to route operations to a specific shard. +** *`terminate_after` (Optional, number)*: The maximum number of documents to collect for each shard. If a query reaches this limit, Elasticsearch terminates the query early. Elasticsearch collects documents before sorting. IMPORTANT: Use with caution. Elasticsearch applies this parameter to each shard handling the request. When possible, let Elasticsearch perform early termination automatically. Avoid specifying this parameter for requests that target data streams with backing indices across multiple data tiers. +** *`q` (Optional, string)*: The query in Lucene query string syntax. This parameter cannot be used with a request body. + +[discrete] +=== create +Create a new document in the index. + +You can index a new JSON document with the `//_doc/` or `//_create/<_id>` APIs +Using `_create` guarantees that the document is indexed only if it does not already exist. +It returns a 409 response when a document with a same ID already exists in the index. +To update an existing document, you must use the `//_doc/` API. + +If the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or index alias: + +* To add a document using the `PUT //_create/<_id>` or `POST //_create/<_id>` request formats, you must have the `create_doc`, `create`, `index`, or `write` index privilege. +* To automatically create a data stream or index with this API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege. + +Automatic data stream creation requires a matching index template with data stream enabled. + +**Automatically create data streams and indices** + +If the request's target doesn't exist and matches an index template with a `data_stream` definition, the index operation automatically creates the data stream. + +If the target doesn't exist and doesn't match a data stream template, the operation automatically creates the index and applies any matching index templates. + +NOTE: Elasticsearch includes several built-in index templates. To avoid naming collisions with these templates, refer to index pattern documentation. + +If no mapping exists, the index operation creates a dynamic mapping. +By default, new fields and objects are automatically added to the mapping if needed. + +Automatic index creation is controlled by the `action.auto_create_index` setting. +If it is `true`, any index can be created automatically. +You can modify this setting to explicitly allow or block automatic creation of indices that match specified patterns or set it to `false` to turn off automatic index creation entirely. +Specify a list of patterns you want to allow or prefix each pattern with `+` or `-` to indicate whether it should be allowed or blocked. +When a list is specified, the default behaviour is to disallow. + +NOTE: The `action.auto_create_index` setting affects the automatic creation of indices only. +It does not affect the creation of data streams. + +**Routing** + +By default, shard placement — or routing — is controlled by using a hash of the document's ID value. +For more explicit control, the value fed into the hash function used by the router can be directly specified on a per-operation basis using the `routing` parameter. + +When setting up explicit mapping, you can also use the `_routing` field to direct the index operation to extract the routing value from the document itself. +This does come at the (very minimal) cost of an additional document parsing pass. +If the `_routing` mapping is defined and set to be required, the index operation will fail if no routing value is provided or extracted. + +NOTE: Data streams do not support custom routing unless they were created with the `allow_custom_routing` setting enabled in the template. + +**Distributed** + +The index operation is directed to the primary shard based on its route and performed on the actual node containing this shard. +After the primary shard completes the operation, if needed, the update is distributed to applicable replicas. + +**Active shards** + +To improve the resiliency of writes to the system, indexing operations can be configured to wait for a certain number of active shard copies before proceeding with the operation. +If the requisite number of active shard copies are not available, then the write operation must wait and retry, until either the requisite shard copies have started or a timeout occurs. +By default, write operations only wait for the primary shards to be active before proceeding (that is to say `wait_for_active_shards` is `1`). +This default can be overridden in the index settings dynamically by setting `index.write.wait_for_active_shards`. +To alter this behavior per operation, use the `wait_for_active_shards request` parameter. + +Valid values are all or any positive integer up to the total number of configured copies per shard in the index (which is `number_of_replicas`+1). +Specifying a negative value or a number greater than the number of shard copies will throw an error. + +For example, suppose you have a cluster of three nodes, A, B, and C and you create an index index with the number of replicas set to 3 (resulting in 4 shard copies, one more copy than there are nodes). +If you attempt an indexing operation, by default the operation will only ensure the primary copy of each shard is available before proceeding. +This means that even if B and C went down and A hosted the primary shard copies, the indexing operation would still proceed with only one copy of the data. +If `wait_for_active_shards` is set on the request to `3` (and all three nodes are up), the indexing operation will require 3 active shard copies before proceeding. +This requirement should be met because there are 3 active nodes in the cluster, each one holding a copy of the shard. +However, if you set `wait_for_active_shards` to `all` (or to `4`, which is the same in this situation), the indexing operation will not proceed as you do not have all 4 copies of each shard active in the index. +The operation will timeout unless a new node is brought up in the cluster to host the fourth copy of the shard. + +It is important to note that this setting greatly reduces the chances of the write operation not writing to the requisite number of shard copies, but it does not completely eliminate the possibility, because this check occurs before the write operation starts. +After the write operation is underway, it is still possible for replication to fail on any number of shard copies but still succeed on the primary. +The `_shards` section of the API response reveals the number of shard copies on which replication succeeded and failed. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-create[Endpoint documentation] +[source,ts] +---- +client.create({ id, index }) +---- +[discrete] +==== Arguments + +* *Request (object):* +** *`id` (string)*: A unique identifier for the document. To automatically generate a document ID, use the `POST //_doc/` request format. +** *`index` (string)*: The name of the data stream or index to target. If the target doesn't exist and matches the name or wildcard (`*`) pattern of an index template with a `data_stream` definition, this request creates the data stream. If the target doesn't exist and doesn’t match a data stream template, this request creates the index. +** *`document` (Optional, object)*: A document. +** *`if_primary_term` (Optional, number)*: Only perform the operation if the document has this primary term. +** *`if_seq_no` (Optional, number)*: Only perform the operation if the document has this sequence number. +** *`include_source_on_error` (Optional, boolean)*: True or false if to include the document source in the error message in case of parsing errors. +** *`op_type` (Optional, Enum("index" | "create"))*: Set to `create` to only index the document if it does not already exist (put if absent). If a document with the specified `_id` already exists, the indexing operation will fail. The behavior is the same as using the `/_create` endpoint. If a document ID is specified, this paramater defaults to `index`. Otherwise, it defaults to `create`. If the request targets a data stream, an `op_type` of `create` is required. +** *`pipeline` (Optional, string)*: The ID of the pipeline to use to preprocess incoming documents. If the index has a default ingest pipeline specified, setting the value to `_none` turns off the default ingest pipeline for this request. If a final pipeline is configured, it will always run regardless of the value of this parameter. +** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true`, Elasticsearch refreshes the affected shards to make this operation visible to search. If `wait_for`, it waits for a refresh to make this operation visible to search. If `false`, it does nothing with refreshes. +** *`require_alias` (Optional, boolean)*: If `true`, the destination must be an index alias. +** *`require_data_stream` (Optional, boolean)*: If `true`, the request's actions must target a data stream (existing or to be created). +** *`routing` (Optional, string)*: A custom value that is used to route operations to a specific shard. +** *`timeout` (Optional, string | -1 | 0)*: The period the request waits for the following operations: automatic index creation, dynamic mapping updates, waiting for active shards. Elasticsearch waits for at least the specified timeout period before failing. The actual wait time could be longer, particularly when multiple waits occur. This parameter is useful for situations where the primary shard assigned to perform the operation might not be available when the operation runs. Some reasons for this might be that the primary shard is currently recovering from a gateway or undergoing relocation. By default, the operation will wait on the primary shard to become available for at least 1 minute before failing and responding with an error. The actual wait time could be longer, particularly when multiple waits occur. +** *`version` (Optional, number)*: The explicit version number for concurrency control. It must be a non-negative long number. +** *`version_type` (Optional, Enum("internal" | "external" | "external_gte" | "force"))*: The version type. +** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: The number of shard copies that must be active before proceeding with the operation. You can set it to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). The default value of `1` means it waits for each primary shard to be active. + +[discrete] +=== delete +Delete a document. + +Remove a JSON document from the specified index. + +NOTE: You cannot send deletion requests directly to a data stream. +To delete a document in a data stream, you must target the backing index containing the document. + +**Optimistic concurrency control** + +Delete operations can be made conditional and only be performed if the last modification to the document was assigned the sequence number and primary term specified by the `if_seq_no` and `if_primary_term` parameters. +If a mismatch is detected, the operation will result in a `VersionConflictException` and a status code of `409`. + +**Versioning** + +Each document indexed is versioned. +When deleting a document, the version can be specified to make sure the relevant document you are trying to delete is actually being deleted and it has not changed in the meantime. +Every write operation run on a document, deletes included, causes its version to be incremented. +The version number of a deleted document remains available for a short time after deletion to allow for control of concurrent operations. +The length of time for which a deleted document's version remains available is determined by the `index.gc_deletes` index setting. + +**Routing** + +If routing is used during indexing, the routing value also needs to be specified to delete a document. + +If the `_routing` mapping is set to `required` and no routing value is specified, the delete API throws a `RoutingMissingException` and rejects the request. + +For example: + +---- +DELETE /my-index-000001/_doc/1?routing=shard-1 +---- + +This request deletes the document with ID 1, but it is routed based on the user. +The document is not deleted if the correct routing is not specified. + +**Distributed** + +The delete operation gets hashed into a specific shard ID. +It then gets redirected into the primary shard within that ID group and replicated (if needed) to shard replicas within that ID group. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-delete[Endpoint documentation] +[source,ts] +---- +client.delete({ id, index }) +---- +[discrete] +==== Arguments + +* *Request (object):* +** *`id` (string)*: A unique identifier for the document. +** *`index` (string)*: The name of the target index. +** *`if_primary_term` (Optional, number)*: Only perform the operation if the document has this primary term. +** *`if_seq_no` (Optional, number)*: Only perform the operation if the document has this sequence number. +** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true`, Elasticsearch refreshes the affected shards to make this operation visible to search. If `wait_for`, it waits for a refresh to make this operation visible to search. If `false`, it does nothing with refreshes. +** *`routing` (Optional, string)*: A custom value used to route operations to a specific shard. +** *`timeout` (Optional, string | -1 | 0)*: The period to wait for active shards. This parameter is useful for situations where the primary shard assigned to perform the delete operation might not be available when the delete operation runs. Some reasons for this might be that the primary shard is currently recovering from a store or undergoing relocation. By default, the delete operation will wait on the primary shard to become available for up to 1 minute before failing and responding with an error. +** *`version` (Optional, number)*: An explicit version number for concurrency control. It must match the current version of the document for the request to succeed. +** *`version_type` (Optional, Enum("internal" | "external" | "external_gte" | "force"))*: The version type. +** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: The minimum number of shard copies that must be active before proceeding with the operation. You can set it to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). The default value of `1` means it waits for each primary shard to be active. + +[discrete] +=== delete_by_query +Delete documents. + +Deletes documents that match the specified query. + +If the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or alias: + +* `read` +* `delete` or `write` + +You can specify the query criteria in the request URI or the request body using the same syntax as the search API. +When you submit a delete by query request, Elasticsearch gets a snapshot of the data stream or index when it begins processing the request and deletes matching documents using internal versioning. +If a document changes between the time that the snapshot is taken and the delete operation is processed, it results in a version conflict and the delete operation fails. + +NOTE: Documents with a version equal to 0 cannot be deleted using delete by query because internal versioning does not support 0 as a valid version number. + +While processing a delete by query request, Elasticsearch performs multiple search requests sequentially to find all of the matching documents to delete. +A bulk delete request is performed for each batch of matching documents. +If a search or bulk request is rejected, the requests are retried up to 10 times, with exponential back off. +If the maximum retry limit is reached, processing halts and all failed requests are returned in the response. +Any delete requests that completed successfully still stick, they are not rolled back. + +You can opt to count version conflicts instead of halting and returning by setting `conflicts` to `proceed`. +Note that if you opt to count version conflicts the operation could attempt to delete more documents from the source than `max_docs` until it has successfully deleted `max_docs documents`, or it has gone through every document in the source query. + +**Throttling delete requests** + +To control the rate at which delete by query issues batches of delete operations, you can set `requests_per_second` to any positive decimal number. +This pads each batch with a wait time to throttle the rate. +Set `requests_per_second` to `-1` to disable throttling. + +Throttling uses a wait time between batches so that the internal scroll requests can be given a timeout that takes the request padding into account. +The padding time is the difference between the batch size divided by the `requests_per_second` and the time spent writing. +By default the batch size is `1000`, so if `requests_per_second` is set to `500`: + +---- +target_time = 1000 / 500 per second = 2 seconds +wait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds +---- + +Since the batch is issued as a single `_bulk` request, large batch sizes cause Elasticsearch to create many requests and wait before starting the next set. +This is "bursty" instead of "smooth". + +**Slicing** + +Delete by query supports sliced scroll to parallelize the delete process. +This can improve efficiency and provide a convenient way to break the request down into smaller parts. + +Setting `slices` to `auto` lets Elasticsearch choose the number of slices to use. +This setting will use one slice per shard, up to a certain limit. +If there are multiple source data streams or indices, it will choose the number of slices based on the index or backing index with the smallest number of shards. +Adding slices to the delete by query operation creates sub-requests which means it has some quirks: + +* You can see these requests in the tasks APIs. These sub-requests are "child" tasks of the task for the request with slices. +* Fetching the status of the task for the request with slices only contains the status of completed slices. +* These sub-requests are individually addressable for things like cancellation and rethrottling. +* Rethrottling the request with `slices` will rethrottle the unfinished sub-request proportionally. +* Canceling the request with `slices` will cancel each sub-request. +* Due to the nature of `slices` each sub-request won't get a perfectly even portion of the documents. All documents will be addressed, but some slices may be larger than others. Expect larger slices to have a more even distribution. +* Parameters like `requests_per_second` and `max_docs` on a request with `slices` are distributed proportionally to each sub-request. Combine that with the earlier point about distribution being uneven and you should conclude that using `max_docs` with `slices` might not result in exactly `max_docs` documents being deleted. +* Each sub-request gets a slightly different snapshot of the source data stream or index though these are all taken at approximately the same time. + +If you're slicing manually or otherwise tuning automatic slicing, keep in mind that: + +* Query performance is most efficient when the number of slices is equal to the number of shards in the index or backing index. If that number is large (for example, 500), choose a lower number as too many `slices` hurts performance. Setting `slices` higher than the number of shards generally does not improve efficiency and adds overhead. +* Delete performance scales linearly across available resources with the number of slices. + +Whether query or delete performance dominates the runtime depends on the documents being reindexed and cluster resources. + +**Cancel a delete by query operation** + +Any delete by query can be canceled using the task cancel API. For example: + +---- +POST _tasks/r1A2WoRbTwKZ516z6NEs5A:36619/_cancel +---- + +The task ID can be found by using the get tasks API. + +Cancellation should happen quickly but might take a few seconds. +The get task status API will continue to list the delete by query task until this task checks that it has been cancelled and terminates itself. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-delete-by-query[Endpoint documentation] +[source,ts] +---- +client.deleteByQuery({ index }) +---- +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (string | string[])*: A list of data streams, indices, and aliases to search. It supports wildcards (`*`). To search all data streams or indices, omit this parameter or use `*` or `_all`. +** *`max_docs` (Optional, number)*: The maximum number of documents to delete. +** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: The documents to delete specified with Query DSL. +** *`slice` (Optional, { field, id, max })*: Slice the request manually using the provided slice ID and total number of slices. +** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. +** *`analyzer` (Optional, string)*: Analyzer to use for the query string. This parameter can be used only when the `q` query string parameter is specified. +** *`analyze_wildcard` (Optional, boolean)*: If `true`, wildcard and prefix queries are analyzed. This parameter can be used only when the `q` query string parameter is specified. +** *`conflicts` (Optional, Enum("abort" | "proceed"))*: What to do if delete by query hits version conflicts: `abort` or `proceed`. +** *`default_operator` (Optional, Enum("and" | "or"))*: The default operator for query string query: `AND` or `OR`. This parameter can be used only when the `q` query string parameter is specified. +** *`df` (Optional, string)*: The field to use as default where no field prefix is given in the query string. This parameter can be used only when the `q` query string parameter is specified. +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports a list of values, such as `open,hidden`. +** *`from` (Optional, number)*: Starting offset (default: 0) +** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error if it targets a missing or closed index. +** *`lenient` (Optional, boolean)*: If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. This parameter can be used only when the `q` query string parameter is specified. +** *`preference` (Optional, string)*: The node or shard the operation should be performed on. It is random by default. +** *`refresh` (Optional, boolean)*: If `true`, Elasticsearch refreshes all shards involved in the delete by query after the request completes. This is different than the delete API's `refresh` parameter, which causes just the shard that received the delete request to be refreshed. Unlike the delete API, it does not support `wait_for`. +** *`request_cache` (Optional, boolean)*: If `true`, the request cache is used for this request. Defaults to the index-level setting. +** *`requests_per_second` (Optional, float)*: The throttle for this request in sub-requests per second. +** *`routing` (Optional, string)*: A custom value used to route operations to a specific shard. +** *`q` (Optional, string)*: A query in the Lucene query string syntax. +** *`scroll` (Optional, string | -1 | 0)*: The period to retain the search context for scrolling. +** *`scroll_size` (Optional, number)*: The size of the scroll request that powers the operation. +** *`search_timeout` (Optional, string | -1 | 0)*: The explicit timeout for each search request. It defaults to no timeout. +** *`search_type` (Optional, Enum("query_then_fetch" | "dfs_query_then_fetch"))*: The type of the search operation. Available options include `query_then_fetch` and `dfs_query_then_fetch`. +** *`slices` (Optional, number | Enum("auto"))*: The number of slices this task should be divided into. +** *`sort` (Optional, string[])*: A list of `:` pairs. +** *`stats` (Optional, string[])*: The specific `tag` of the request for logging and statistical purposes. +** *`terminate_after` (Optional, number)*: The maximum number of documents to collect for each shard. If a query reaches this limit, Elasticsearch terminates the query early. Elasticsearch collects documents before sorting. Use with caution. Elasticsearch applies this parameter to each shard handling the request. When possible, let Elasticsearch perform early termination automatically. Avoid specifying this parameter for requests that target data streams with backing indices across multiple data tiers. +** *`timeout` (Optional, string | -1 | 0)*: The period each deletion request waits for active shards. +** *`version` (Optional, boolean)*: If `true`, returns the document version as part of a hit. +** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: The number of shard copies that must be active before proceeding with the operation. Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). The `timeout` value controls how long each write request waits for unavailable shards to become available. +** *`wait_for_completion` (Optional, boolean)*: If `true`, the request blocks until the operation is complete. If `false`, Elasticsearch performs some preflight checks, launches the request, and returns a task you can use to cancel or get the status of the task. Elasticsearch creates a record of this task as a document at `.tasks/task/${taskId}`. When you are done with a task, you should delete the task document so Elasticsearch can reclaim the space. + +[discrete] +=== delete_by_query_rethrottle +Throttle a delete by query operation. + +Change the number of requests per second for a particular delete by query operation. +Rethrottling that speeds up the query takes effect immediately but rethrotting that slows down the query takes effect after completing the current batch to prevent scroll timeouts. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-delete-by-query-rethrottle[Endpoint documentation] +[source,ts] +---- +client.deleteByQueryRethrottle({ task_id }) +---- +[discrete] +==== Arguments + +* *Request (object):* +** *`task_id` (string | number)*: The ID for the task. +** *`requests_per_second` (Optional, float)*: The throttle for this request in sub-requests per second. To disable throttling, set it to `-1`. + +[discrete] +=== delete_script +Delete a script or search template. +Deletes a stored script or search template. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-delete-script[Endpoint documentation] +[source,ts] +---- +client.deleteScript({ id }) +---- +[discrete] +==== Arguments + +* *Request (object):* +** *`id` (string)*: The identifier for the stored script or search template. +** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout. +** *`timeout` (Optional, string | -1 | 0)*: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout. + +[discrete] +=== exists +Check a document. + +Verify that a document exists. +For example, check to see if a document with the `_id` 0 exists: + +---- +HEAD my-index-000001/_doc/0 +---- + +If the document exists, the API returns a status code of `200 - OK`. +If the document doesn’t exist, the API returns `404 - Not Found`. + +**Versioning support** + +You can use the `version` parameter to check the document only if its current version is equal to the specified one. + +Internally, Elasticsearch has marked the old document as deleted and added an entirely new document. +The old version of the document doesn't disappear immediately, although you won't be able to access it. +Elasticsearch cleans up deleted documents in the background as you continue to index more data. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get[Endpoint documentation] +[source,ts] +---- +client.exists({ id, index }) +---- +[discrete] +==== Arguments + +* *Request (object):* +** *`id` (string)*: A unique document identifier. +** *`index` (string)*: A list of data streams, indices, and aliases. It supports wildcards (`*`). +** *`preference` (Optional, string)*: The node or shard the operation should be performed on. By default, the operation is randomized between the shard replicas. If it is set to `_local`, the operation will prefer to be run on a local allocated shard when possible. If it is set to a custom value, the value is used to guarantee that the same shards will be used for the same custom value. This can help with "jumping values" when hitting different shards in different refresh states. A sample value can be something like the web session ID or the user name. +** *`realtime` (Optional, boolean)*: If `true`, the request is real-time as opposed to near-real-time. +** *`refresh` (Optional, boolean)*: If `true`, the request refreshes the relevant shards before retrieving the document. Setting it to `true` should be done after careful thought and verification that this does not cause a heavy load on the system (and slow down indexing). +** *`routing` (Optional, string)*: A custom value used to route operations to a specific shard. +** *`_source` (Optional, boolean | string | string[])*: Indicates whether to return the `_source` field (`true` or `false`) or lists the fields to return. +** *`_source_excludes` (Optional, string | string[])*: A list of source fields to exclude from the response. You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. +** *`_source_includes` (Optional, string | string[])*: A list of source fields to include in the response. If this parameter is specified, only these source fields are returned. You can exclude fields from this subset using the `_source_excludes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. +** *`stored_fields` (Optional, string | string[])*: A list of stored fields to return as part of a hit. If no fields are specified, no stored fields are included in the response. If this field is specified, the `_source` parameter defaults to `false`. +** *`version` (Optional, number)*: Explicit version number for concurrency control. The specified version must match the current version of the document for the request to succeed. +** *`version_type` (Optional, Enum("internal" | "external" | "external_gte" | "force"))*: The version type. + +[discrete] +=== exists_source +Check for a document source. + +Check whether a document source exists in an index. +For example: + +---- +HEAD my-index-000001/_source/1 +---- + +A document's source is not available if it is disabled in the mapping. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get[Endpoint documentation] +[source,ts] +---- +client.existsSource({ id, index }) +---- +[discrete] +==== Arguments + +* *Request (object):* +** *`id` (string)*: A unique identifier for the document. +** *`index` (string)*: A list of data streams, indices, and aliases. It supports wildcards (`*`). +** *`preference` (Optional, string)*: The node or shard the operation should be performed on. By default, the operation is randomized between the shard replicas. +** *`realtime` (Optional, boolean)*: If `true`, the request is real-time as opposed to near-real-time. +** *`refresh` (Optional, boolean)*: If `true`, the request refreshes the relevant shards before retrieving the document. Setting it to `true` should be done after careful thought and verification that this does not cause a heavy load on the system (and slow down indexing). +** *`routing` (Optional, string)*: A custom value used to route operations to a specific shard. +** *`_source` (Optional, boolean | string | string[])*: Indicates whether to return the `_source` field (`true` or `false`) or lists the fields to return. +** *`_source_excludes` (Optional, string | string[])*: A list of source fields to exclude in the response. +** *`_source_includes` (Optional, string | string[])*: A list of source fields to include in the response. +** *`version` (Optional, number)*: The version number for concurrency control. It must match the current version of the document for the request to succeed. +** *`version_type` (Optional, Enum("internal" | "external" | "external_gte" | "force"))*: The version type. + +[discrete] +=== explain +Explain a document match result. +Get information about why a specific document matches, or doesn't match, a query. +It computes a score explanation for a query and a specific document. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-explain[Endpoint documentation] +[source,ts] +---- +client.explain({ id, index }) +---- +[discrete] +==== Arguments + +* *Request (object):* +** *`id` (string)*: The document identifier. +** *`index` (string)*: Index names that are used to limit the request. Only a single index name can be provided to this parameter. +** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Defines the search definition using the Query DSL. +** *`analyzer` (Optional, string)*: The analyzer to use for the query string. This parameter can be used only when the `q` query string parameter is specified. +** *`analyze_wildcard` (Optional, boolean)*: If `true`, wildcard and prefix queries are analyzed. This parameter can be used only when the `q` query string parameter is specified. +** *`default_operator` (Optional, Enum("and" | "or"))*: The default operator for query string query: `AND` or `OR`. This parameter can be used only when the `q` query string parameter is specified. +** *`df` (Optional, string)*: The field to use as default where no field prefix is given in the query string. This parameter can be used only when the `q` query string parameter is specified. +** *`lenient` (Optional, boolean)*: If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. This parameter can be used only when the `q` query string parameter is specified. +** *`preference` (Optional, string)*: The node or shard the operation should be performed on. It is random by default. +** *`routing` (Optional, string)*: A custom value used to route operations to a specific shard. +** *`_source` (Optional, boolean | string | string[])*: `True` or `false` to return the `_source` field or not or a list of fields to return. +** *`_source_excludes` (Optional, string | string[])*: A list of source fields to exclude from the response. You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. +** *`_source_includes` (Optional, string | string[])*: A list of source fields to include in the response. If this parameter is specified, only these source fields are returned. You can exclude fields from this subset using the `_source_excludes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. +** *`stored_fields` (Optional, string | string[])*: A list of stored fields to return in the response. +** *`q` (Optional, string)*: The query in the Lucene query string syntax. + +[discrete] +=== field_caps +Get the field capabilities. + +Get information about the capabilities of fields among multiple indices. + +For data streams, the API returns field capabilities among the stream’s backing indices. +It returns runtime fields like any other field. +For example, a runtime field with a type of keyword is returned the same as any other field that belongs to the `keyword` family. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-field-caps[Endpoint documentation] +[source,ts] +---- +client.fieldCaps({ ... }) +---- +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (Optional, string | string[])*: A list of data streams, indices, and aliases used to limit the request. Supports wildcards (*). To target all data streams and indices, omit this parameter or use * or _all. +** *`fields` (Optional, string | string[])*: A list of fields to retrieve capabilities for. Wildcard (`*`) expressions are supported. +** *`index_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Filter indices if the provided query rewrites to `match_none` on every shard. IMPORTANT: The filtering is done on a best-effort basis, it uses index statistics and mappings to rewrite queries to `match_none` instead of fully running the request. For instance a range query over a date field can rewrite to `match_none` if all documents within a shard (including deleted documents) are outside of the provided range. However, not all queries can rewrite to `match_none` so this API may return an index even if the provided filter matches no document. +** *`runtime_mappings` (Optional, Record)*: Define ad-hoc runtime fields in the request similar to the way it is done in search requests. These fields exist only as part of the query and take precedence over fields defined with the same name in the index mappings. +** *`allow_no_indices` (Optional, boolean)*: If false, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with foo but no index starts with bar. +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. +** *`ignore_unavailable` (Optional, boolean)*: If `true`, missing or closed indices are not included in the response. +** *`include_unmapped` (Optional, boolean)*: If true, unmapped fields are included in the response. +** *`filters` (Optional, string)*: A list of filters to apply to the response. +** *`types` (Optional, string[])*: A list of field types to include. Any fields that do not match one of these types will be excluded from the results. It defaults to empty, meaning that all field types are returned. +** *`include_empty_fields` (Optional, boolean)*: If false, empty fields are not included in the response. + +[discrete] +=== get +Get a document by its ID. + +Get a document and its source or stored fields from an index. + +By default, this API is realtime and is not affected by the refresh rate of the index (when data will become visible for search). +In the case where stored fields are requested with the `stored_fields` parameter and the document has been updated but is not yet refreshed, the API will have to parse and analyze the source to extract the stored fields. +To turn off realtime behavior, set the `realtime` parameter to false. + +**Source filtering** + +By default, the API returns the contents of the `_source` field unless you have used the `stored_fields` parameter or the `_source` field is turned off. +You can turn off `_source` retrieval by using the `_source` parameter: + +---- +GET my-index-000001/_doc/0?_source=false +---- + +If you only need one or two fields from the `_source`, use the `_source_includes` or `_source_excludes` parameters to include or filter out particular fields. +This can be helpful with large documents where partial retrieval can save on network overhead +Both parameters take a comma separated list of fields or wildcard expressions. +For example: + +---- +GET my-index-000001/_doc/0?_source_includes=*.id&_source_excludes=entities +---- + +If you only want to specify includes, you can use a shorter notation: + +---- +GET my-index-000001/_doc/0?_source=*.id +---- + +**Routing** + +If routing is used during indexing, the routing value also needs to be specified to retrieve a document. +For example: + +---- +GET my-index-000001/_doc/2?routing=user1 +---- + +This request gets the document with ID 2, but it is routed based on the user. +The document is not fetched if the correct routing is not specified. + +**Distributed** + +The GET operation is hashed into a specific shard ID. +It is then redirected to one of the replicas within that shard ID and returns the result. +The replicas are the primary shard and its replicas within that shard ID group. +This means that the more replicas you have, the better your GET scaling will be. + +**Versioning support** + +You can use the `version` parameter to retrieve the document only if its current version is equal to the specified one. + +Internally, Elasticsearch has marked the old document as deleted and added an entirely new document. +The old version of the document doesn't disappear immediately, although you won't be able to access it. +Elasticsearch cleans up deleted documents in the background as you continue to index more data. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get[Endpoint documentation] +[source,ts] +---- +client.get({ id, index }) +---- +[discrete] +==== Arguments + +* *Request (object):* +** *`id` (string)*: A unique document identifier. +** *`index` (string)*: The name of the index that contains the document. +** *`force_synthetic_source` (Optional, boolean)*: Indicates whether the request forces synthetic `_source`. Use this paramater to test if the mapping supports synthetic `_source` and to get a sense of the worst case performance. Fetches with this parameter enabled will be slower than enabling synthetic source natively in the index. +** *`preference` (Optional, string)*: The node or shard the operation should be performed on. By default, the operation is randomized between the shard replicas. If it is set to `_local`, the operation will prefer to be run on a local allocated shard when possible. If it is set to a custom value, the value is used to guarantee that the same shards will be used for the same custom value. This can help with "jumping values" when hitting different shards in different refresh states. A sample value can be something like the web session ID or the user name. +** *`realtime` (Optional, boolean)*: If `true`, the request is real-time as opposed to near-real-time. +** *`refresh` (Optional, boolean)*: If `true`, the request refreshes the relevant shards before retrieving the document. Setting it to `true` should be done after careful thought and verification that this does not cause a heavy load on the system (and slow down indexing). +** *`routing` (Optional, string)*: A custom value used to route operations to a specific shard. +** *`_source` (Optional, boolean | string | string[])*: Indicates whether to return the `_source` field (`true` or `false`) or lists the fields to return. +** *`_source_excludes` (Optional, string | string[])*: A list of source fields to exclude from the response. You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. +** *`_source_includes` (Optional, string | string[])*: A list of source fields to include in the response. If this parameter is specified, only these source fields are returned. You can exclude fields from this subset using the `_source_excludes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. +** *`stored_fields` (Optional, string | string[])*: A list of stored fields to return as part of a hit. If no fields are specified, no stored fields are included in the response. If this field is specified, the `_source` parameter defaults to `false`. Only leaf fields can be retrieved with the `stored_field` option. Object fields can't be returned;if specified, the request fails. +** *`version` (Optional, number)*: The version number for concurrency control. It must match the current version of the document for the request to succeed. +** *`version_type` (Optional, Enum("internal" | "external" | "external_gte" | "force"))*: The version type. + +[discrete] +=== get_script +Get a script or search template. +Retrieves a stored script or search template. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get-script[Endpoint documentation] +[source,ts] +---- +client.getScript({ id }) +---- +[discrete] +==== Arguments + +* *Request (object):* +** *`id` (string)*: The identifier for the stored script or search template. +** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout. + +[discrete] +=== get_script_context +Get script contexts. + +Get a list of supported script contexts and their methods. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get-script-context[Endpoint documentation] +[source,ts] +---- +client.getScriptContext() +---- + +[discrete] +=== get_script_languages +Get script languages. + +Get a list of available script types, languages, and contexts. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get-script-languages[Endpoint documentation] +[source,ts] +---- +client.getScriptLanguages() +---- + +[discrete] +=== get_source +Get a document's source. + +Get the source of a document. +For example: + +---- +GET my-index-000001/_source/1 +---- + +You can use the source filtering parameters to control which parts of the `_source` are returned: + +---- +GET my-index-000001/_source/1/?_source_includes=*.id&_source_excludes=entities +---- + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get[Endpoint documentation] +[source,ts] +---- +client.getSource({ id, index }) +---- +[discrete] +==== Arguments + +* *Request (object):* +** *`id` (string)*: A unique document identifier. +** *`index` (string)*: The name of the index that contains the document. +** *`preference` (Optional, string)*: The node or shard the operation should be performed on. By default, the operation is randomized between the shard replicas. +** *`realtime` (Optional, boolean)*: If `true`, the request is real-time as opposed to near-real-time. +** *`refresh` (Optional, boolean)*: If `true`, the request refreshes the relevant shards before retrieving the document. Setting it to `true` should be done after careful thought and verification that this does not cause a heavy load on the system (and slow down indexing). +** *`routing` (Optional, string)*: A custom value used to route operations to a specific shard. +** *`_source` (Optional, boolean | string | string[])*: Indicates whether to return the `_source` field (`true` or `false`) or lists the fields to return. +** *`_source_excludes` (Optional, string | string[])*: A list of source fields to exclude in the response. +** *`_source_includes` (Optional, string | string[])*: A list of source fields to include in the response. +** *`stored_fields` (Optional, string | string[])*: A list of stored fields to return as part of a hit. +** *`version` (Optional, number)*: The version number for concurrency control. It must match the current version of the document for the request to succeed. +** *`version_type` (Optional, Enum("internal" | "external" | "external_gte" | "force"))*: The version type. + +[discrete] +=== health_report +Get the cluster health. +Get a report with the health status of an Elasticsearch cluster. +The report contains a list of indicators that compose Elasticsearch functionality. + +Each indicator has a health status of: green, unknown, yellow or red. +The indicator will provide an explanation and metadata describing the reason for its current health status. + +The cluster’s status is controlled by the worst indicator status. + +In the event that an indicator’s status is non-green, a list of impacts may be present in the indicator result which detail the functionalities that are negatively affected by the health issue. +Each impact carries with it a severity level, an area of the system that is affected, and a simple description of the impact on the system. + +Some health indicators can determine the root cause of a health problem and prescribe a set of steps that can be performed in order to improve the health of the system. +The root cause and remediation steps are encapsulated in a diagnosis. +A diagnosis contains a cause detailing a root cause analysis, an action containing a brief description of the steps to take to fix the problem, the list of affected resources (if applicable), and a detailed step-by-step troubleshooting guide to fix the diagnosed problem. + +NOTE: The health indicators perform root cause analysis of non-green health statuses. This can be computationally expensive when called frequently. +When setting up automated polling of the API for health status, set verbose to false to disable the more expensive analysis logic. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-health-report[Endpoint documentation] +[source,ts] +---- +client.healthReport({ ... }) +---- +[discrete] +==== Arguments + +* *Request (object):* +** *`feature` (Optional, string | string[])*: A feature of the cluster, as returned by the top-level health report API. +** *`timeout` (Optional, string | -1 | 0)*: Explicit operation timeout. +** *`verbose` (Optional, boolean)*: Opt-in for more information about the health of the system. +** *`size` (Optional, number)*: Limit the number of affected resources the health report API returns. + +[discrete] +=== index +Create or update a document in an index. + +Add a JSON document to the specified data stream or index and make it searchable. +If the target is an index and the document already exists, the request updates the document and increments its version. + +NOTE: You cannot use this API to send update requests for existing documents in a data stream. + +If the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or index alias: + +* To add or overwrite a document using the `PUT //_doc/<_id>` request format, you must have the `create`, `index`, or `write` index privilege. +* To add a document using the `POST //_doc/` request format, you must have the `create_doc`, `create`, `index`, or `write` index privilege. +* To automatically create a data stream or index with this API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege. + +Automatic data stream creation requires a matching index template with data stream enabled. + +NOTE: Replica shards might not all be started when an indexing operation returns successfully. +By default, only the primary is required. Set `wait_for_active_shards` to change this default behavior. + +**Automatically create data streams and indices** + +If the request's target doesn't exist and matches an index template with a `data_stream` definition, the index operation automatically creates the data stream. + +If the target doesn't exist and doesn't match a data stream template, the operation automatically creates the index and applies any matching index templates. + +NOTE: Elasticsearch includes several built-in index templates. To avoid naming collisions with these templates, refer to index pattern documentation. + +If no mapping exists, the index operation creates a dynamic mapping. +By default, new fields and objects are automatically added to the mapping if needed. + +Automatic index creation is controlled by the `action.auto_create_index` setting. +If it is `true`, any index can be created automatically. +You can modify this setting to explicitly allow or block automatic creation of indices that match specified patterns or set it to `false` to turn off automatic index creation entirely. +Specify a list of patterns you want to allow or prefix each pattern with `+` or `-` to indicate whether it should be allowed or blocked. +When a list is specified, the default behaviour is to disallow. + +NOTE: The `action.auto_create_index` setting affects the automatic creation of indices only. +It does not affect the creation of data streams. + +**Optimistic concurrency control** + +Index operations can be made conditional and only be performed if the last modification to the document was assigned the sequence number and primary term specified by the `if_seq_no` and `if_primary_term` parameters. +If a mismatch is detected, the operation will result in a `VersionConflictException` and a status code of `409`. + +**Routing** + +By default, shard placement — or routing — is controlled by using a hash of the document's ID value. +For more explicit control, the value fed into the hash function used by the router can be directly specified on a per-operation basis using the `routing` parameter. + +When setting up explicit mapping, you can also use the `_routing` field to direct the index operation to extract the routing value from the document itself. +This does come at the (very minimal) cost of an additional document parsing pass. +If the `_routing` mapping is defined and set to be required, the index operation will fail if no routing value is provided or extracted. + +NOTE: Data streams do not support custom routing unless they were created with the `allow_custom_routing` setting enabled in the template. + +**Distributed** + +The index operation is directed to the primary shard based on its route and performed on the actual node containing this shard. +After the primary shard completes the operation, if needed, the update is distributed to applicable replicas. + +**Active shards** + +To improve the resiliency of writes to the system, indexing operations can be configured to wait for a certain number of active shard copies before proceeding with the operation. +If the requisite number of active shard copies are not available, then the write operation must wait and retry, until either the requisite shard copies have started or a timeout occurs. +By default, write operations only wait for the primary shards to be active before proceeding (that is to say `wait_for_active_shards` is `1`). +This default can be overridden in the index settings dynamically by setting `index.write.wait_for_active_shards`. +To alter this behavior per operation, use the `wait_for_active_shards request` parameter. + +Valid values are all or any positive integer up to the total number of configured copies per shard in the index (which is `number_of_replicas`+1). +Specifying a negative value or a number greater than the number of shard copies will throw an error. + +For example, suppose you have a cluster of three nodes, A, B, and C and you create an index index with the number of replicas set to 3 (resulting in 4 shard copies, one more copy than there are nodes). +If you attempt an indexing operation, by default the operation will only ensure the primary copy of each shard is available before proceeding. +This means that even if B and C went down and A hosted the primary shard copies, the indexing operation would still proceed with only one copy of the data. +If `wait_for_active_shards` is set on the request to `3` (and all three nodes are up), the indexing operation will require 3 active shard copies before proceeding. +This requirement should be met because there are 3 active nodes in the cluster, each one holding a copy of the shard. +However, if you set `wait_for_active_shards` to `all` (or to `4`, which is the same in this situation), the indexing operation will not proceed as you do not have all 4 copies of each shard active in the index. +The operation will timeout unless a new node is brought up in the cluster to host the fourth copy of the shard. + +It is important to note that this setting greatly reduces the chances of the write operation not writing to the requisite number of shard copies, but it does not completely eliminate the possibility, because this check occurs before the write operation starts. +After the write operation is underway, it is still possible for replication to fail on any number of shard copies but still succeed on the primary. +The `_shards` section of the API response reveals the number of shard copies on which replication succeeded and failed. + +**No operation (noop) updates** + +When updating a document by using this API, a new version of the document is always created even if the document hasn't changed. +If this isn't acceptable use the `_update` API with `detect_noop` set to `true`. +The `detect_noop` option isn't available on this API because it doesn’t fetch the old source and isn't able to compare it against the new source. + +There isn't a definitive rule for when noop updates aren't acceptable. +It's a combination of lots of factors like how frequently your data source sends updates that are actually noops and how many queries per second Elasticsearch runs on the shard receiving the updates. + +**Versioning** + +Each indexed document is given a version number. +By default, internal versioning is used that starts at 1 and increments with each update, deletes included. +Optionally, the version number can be set to an external value (for example, if maintained in a database). +To enable this functionality, `version_type` should be set to `external`. +The value provided must be a numeric, long value greater than or equal to 0, and less than around `9.2e+18`. + +NOTE: Versioning is completely real time, and is not affected by the near real time aspects of search operations. +If no version is provided, the operation runs without any version checks. + +When using the external version type, the system checks to see if the version number passed to the index request is greater than the version of the currently stored document. +If true, the document will be indexed and the new version number used. +If the value provided is less than or equal to the stored document's version number, a version conflict will occur and the index operation will fail. For example: + +---- +PUT my-index-000001/_doc/1?version=2&version_type=external +{ + "user": { + "id": "elkbee" + } +} +---- + +In this example, the operation will succeed since the supplied version of 2 is higher than the current document version of 1. +If the document was already updated and its version was set to 2 or higher, the indexing command will fail and result in a conflict (409 HTTP status code). + +A nice side effect is that there is no need to maintain strict ordering of async indexing operations run as a result of changes to a source database, as long as version numbers from the source database are used. +Even the simple case of updating the Elasticsearch index using data from a database is simplified if external versioning is used, as only the latest version will be used if the index operations arrive out of order. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-create[Endpoint documentation] +[source,ts] +---- +client.index({ index }) +---- +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (string)*: The name of the data stream or index to target. If the target doesn't exist and matches the name or wildcard (`*`) pattern of an index template with a `data_stream` definition, this request creates the data stream. If the target doesn't exist and doesn't match a data stream template, this request creates the index. You can check for existing targets with the resolve index API. +** *`id` (Optional, string)*: A unique identifier for the document. To automatically generate a document ID, use the `POST //_doc/` request format and omit this parameter. +** *`document` (Optional, object)*: A document. +** *`if_primary_term` (Optional, number)*: Only perform the operation if the document has this primary term. +** *`if_seq_no` (Optional, number)*: Only perform the operation if the document has this sequence number. +** *`include_source_on_error` (Optional, boolean)*: True or false if to include the document source in the error message in case of parsing errors. +** *`op_type` (Optional, Enum("index" | "create"))*: Set to `create` to only index the document if it does not already exist (put if absent). If a document with the specified `_id` already exists, the indexing operation will fail. The behavior is the same as using the `/_create` endpoint. If a document ID is specified, this paramater defaults to `index`. Otherwise, it defaults to `create`. If the request targets a data stream, an `op_type` of `create` is required. +** *`pipeline` (Optional, string)*: The ID of the pipeline to use to preprocess incoming documents. If the index has a default ingest pipeline specified, then setting the value to `_none` disables the default ingest pipeline for this request. If a final pipeline is configured it will always run, regardless of the value of this parameter. +** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true`, Elasticsearch refreshes the affected shards to make this operation visible to search. If `wait_for`, it waits for a refresh to make this operation visible to search. If `false`, it does nothing with refreshes. +** *`routing` (Optional, string)*: A custom value that is used to route operations to a specific shard. +** *`timeout` (Optional, string | -1 | 0)*: The period the request waits for the following operations: automatic index creation, dynamic mapping updates, waiting for active shards. This parameter is useful for situations where the primary shard assigned to perform the operation might not be available when the operation runs. Some reasons for this might be that the primary shard is currently recovering from a gateway or undergoing relocation. By default, the operation will wait on the primary shard to become available for at least 1 minute before failing and responding with an error. The actual wait time could be longer, particularly when multiple waits occur. +** *`version` (Optional, number)*: An explicit version number for concurrency control. It must be a non-negative long number. +** *`version_type` (Optional, Enum("internal" | "external" | "external_gte" | "force"))*: The version type. +** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: The number of shard copies that must be active before proceeding with the operation. You can set it to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). The default value of `1` means it waits for each primary shard to be active. +** *`require_alias` (Optional, boolean)*: If `true`, the destination must be an index alias. + +[discrete] +=== info +Get cluster info. +Get basic build, version, and cluster information. + +https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-info[Endpoint documentation] +[source,ts] +---- +client.info() +---- + +[discrete] +=== knn_search +Run a knn search. + +NOTE: The kNN search API has been replaced by the `knn` option in the search API. + +Perform a k-nearest neighbor (kNN) search on a dense_vector field and return the matching documents. +Given a query vector, the API finds the k closest vectors and returns those documents as search hits. + +Elasticsearch uses the HNSW algorithm to support efficient kNN search. +Like most kNN algorithms, HNSW is an approximate method that sacrifices result accuracy for improved search speed. +This means the results returned are not always the true k closest neighbors. + +The kNN search API supports restricting the search using a filter. +The search will return the top k documents that also match the filter query. + +A kNN search response has the exact same structure as a search API response. +However, certain sections have a meaning specific to kNN search: + +* The document `_score` is determined by the similarity between the query and document vector. +* The `hits.total` object contains the total number of nearest neighbor candidates considered, which is `num_candidates * num_shards`. The `hits.total.relation` will always be `eq`, indicating an exact value. + +{ref}/knn-search-api.html[Endpoint documentation] +[source,ts] +---- +client.knnSearch({ index, knn }) +---- +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (string | string[])*: A list of index names to search; use `_all` or to perform the operation on all indices. +** *`knn` ({ field, query_vector, k, num_candidates })*: The kNN query to run. +** *`_source` (Optional, boolean | { excludes, includes })*: Indicates which source fields are returned for matching documents. These fields are returned in the `hits._source` property of the search response. +** *`docvalue_fields` (Optional, { field, format, include_unmapped }[])*: The request returns doc values for field names matching these patterns in the `hits.fields` property of the response. It accepts wildcard (`*`) patterns. +** *`stored_fields` (Optional, string | string[])*: A list of stored fields to return as part of a hit. If no fields are specified, no stored fields are included in the response. If this field is specified, the `_source` parameter defaults to `false`. You can pass `_source: true` to return both source fields and stored fields in the search response. +** *`fields` (Optional, string | string[])*: The request returns values for field names matching these patterns in the `hits.fields` property of the response. It accepts wildcard (`*`) patterns. +** *`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type } | { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type }[])*: A query to filter the documents that can match. The kNN search will return the top `k` documents that also match this filter. The value can be a single query or a list of queries. If `filter` isn't provided, all documents are allowed to match. +** *`routing` (Optional, string)*: A list of specific routing values. + +[discrete] +=== mget +Get multiple documents. + +Get multiple JSON documents by ID from one or more indices. +If you specify an index in the request URI, you only need to specify the document IDs in the request body. +To ensure fast responses, this multi get (mget) API responds with partial results if one or more shards fail. + +**Filter source fields** + +By default, the `_source` field is returned for every document (if stored). +Use the `_source` and `_source_include` or `source_exclude` attributes to filter what fields are returned for a particular document. +You can include the `_source`, `_source_includes`, and `_source_excludes` query parameters in the request URI to specify the defaults to use when there are no per-document instructions. + +**Get stored fields** + +Use the `stored_fields` attribute to specify the set of stored fields you want to retrieve. +Any requested fields that are not stored are ignored. +You can include the `stored_fields` query parameter in the request URI to specify the defaults to use when there are no per-document instructions. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-mget[Endpoint documentation] +[source,ts] +---- +client.mget({ ... }) +---- +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (Optional, string)*: Name of the index to retrieve documents from when `ids` are specified, or when a document in the `docs` array does not specify an index. +** *`docs` (Optional, { _id, _index, routing, _source, stored_fields, version, version_type }[])*: The documents you want to retrieve. Required if no index is specified in the request URI. +** *`ids` (Optional, string | string[])*: The IDs of the documents you want to retrieve. Allowed when the index is specified in the request URI. +** *`force_synthetic_source` (Optional, boolean)*: Should this request force synthetic _source? Use this to test if the mapping supports synthetic _source and to get a sense of the worst case performance. Fetches with this enabled will be slower the enabling synthetic source natively in the index. +** *`preference` (Optional, string)*: Specifies the node or shard the operation should be performed on. Random by default. +** *`realtime` (Optional, boolean)*: If `true`, the request is real-time as opposed to near-real-time. +** *`refresh` (Optional, boolean)*: If `true`, the request refreshes relevant shards before retrieving documents. +** *`routing` (Optional, string)*: Custom value used to route operations to a specific shard. +** *`_source` (Optional, boolean | string | string[])*: True or false to return the `_source` field or not, or a list of fields to return. +** *`_source_excludes` (Optional, string | string[])*: A list of source fields to exclude from the response. You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. +** *`_source_includes` (Optional, string | string[])*: A list of source fields to include in the response. If this parameter is specified, only these source fields are returned. You can exclude fields from this subset using the `_source_excludes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. +** *`stored_fields` (Optional, string | string[])*: If `true`, retrieves the document fields stored in the index rather than the document `_source`. + +[discrete] +=== msearch +Run multiple searches. + +The format of the request is similar to the bulk API format and makes use of the newline delimited JSON (NDJSON) format. +The structure is as follows: + +---- +header\n +body\n +header\n +body\n +---- + +This structure is specifically optimized to reduce parsing if a specific search ends up redirected to another node. + +IMPORTANT: The final line of data must end with a newline character `\n`. +Each newline character may be preceded by a carriage return `\r`. +When sending requests to this endpoint the `Content-Type` header should be set to `application/x-ndjson`. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-msearch[Endpoint documentation] +[source,ts] +---- +client.msearch({ ... }) +---- +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (Optional, string | string[])*: List of data streams, indices, and index aliases to search. +** *`searches` (Optional, { allow_no_indices, expand_wildcards, ignore_unavailable, index, preference, request_cache, routing, search_type, ccs_minimize_roundtrips, allow_partial_search_results, ignore_throttled } | { aggregations, collapse, query, explain, ext, stored_fields, docvalue_fields, knn, from, highlight, indices_boost, min_score, post_filter, profile, rescore, script_fields, search_after, size, sort, _source, fields, terminate_after, stats, timeout, track_scores, track_total_hits, version, runtime_mappings, seq_no_primary_term, pit, suggest }[])* +** *`allow_no_indices` (Optional, boolean)*: If false, the request returns an error if any wildcard expression, index alias, or _all value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting foo*,bar* returns an error if an index starts with foo but no index starts with bar. +** *`ccs_minimize_roundtrips` (Optional, boolean)*: If true, network roundtrips between the coordinating node and remote clusters are minimized for cross-cluster search requests. +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard expressions can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +** *`ignore_throttled` (Optional, boolean)*: If true, concrete, expanded or aliased indices are ignored when frozen. +** *`ignore_unavailable` (Optional, boolean)*: If true, missing or closed indices are not included in the response. +** *`include_named_queries_score` (Optional, boolean)*: Indicates whether hit.matched_queries should be rendered as a map that includes the name of the matched query associated with its score (true) or as an array containing the name of the matched queries (false) This functionality reruns each named query on every hit in a search response. Typically, this adds a small overhead to a request. However, using computationally expensive named queries on a large number of hits may add significant overhead. +** *`max_concurrent_searches` (Optional, number)*: Maximum number of concurrent searches the multi search API can execute. +** *`max_concurrent_shard_requests` (Optional, number)*: Maximum number of concurrent shard requests that each sub-search request executes per node. +** *`pre_filter_shard_size` (Optional, number)*: Defines a threshold that enforces a pre-filter roundtrip to prefilter search shards based on query rewriting if the number of shards the search request expands to exceeds the threshold. This filter roundtrip can limit the number of shards significantly if for instance a shard can not match any documents based on its rewrite method i.e., if date filters are mandatory to match but the shard bounds and the query are disjoint. +** *`rest_total_hits_as_int` (Optional, boolean)*: If true, hits.total are returned as an integer in the response. Defaults to false, which returns an object. +** *`routing` (Optional, string)*: Custom routing value used to route search operations to a specific shard. +** *`search_type` (Optional, Enum("query_then_fetch" | "dfs_query_then_fetch"))*: Indicates whether global term and document frequencies should be used when scoring returned documents. +** *`typed_keys` (Optional, boolean)*: Specifies whether aggregation and suggester names should be prefixed by their respective types in the response. + +[discrete] +=== msearch_template +Run multiple templated searches. + +Run multiple templated searches with a single request. +If you are providing a text file or text input to `curl`, use the `--data-binary` flag instead of `-d` to preserve newlines. +For example: + +---- +$ cat requests +{ "index": "my-index" } +{ "id": "my-search-template", "params": { "query_string": "hello world", "from": 0, "size": 10 }} +{ "index": "my-other-index" } +{ "id": "my-other-search-template", "params": { "query_type": "match_all" }} + +$ curl -H "Content-Type: application/x-ndjson" -XGET localhost:9200/_msearch/template --data-binary "@requests"; echo +---- + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-msearch-template[Endpoint documentation] +[source,ts] +---- +client.msearchTemplate({ ... }) +---- +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (Optional, string | string[])*: A list of data streams, indices, and aliases to search. It supports wildcards (`*`). To search all data streams and indices, omit this parameter or use `*`. +** *`search_templates` (Optional, { allow_no_indices, expand_wildcards, ignore_unavailable, index, preference, request_cache, routing, search_type, ccs_minimize_roundtrips, allow_partial_search_results, ignore_throttled } | { aggregations, collapse, query, explain, ext, stored_fields, docvalue_fields, knn, from, highlight, indices_boost, min_score, post_filter, profile, rescore, script_fields, search_after, size, sort, _source, fields, terminate_after, stats, timeout, track_scores, track_total_hits, version, runtime_mappings, seq_no_primary_term, pit, suggest }[])* +** *`ccs_minimize_roundtrips` (Optional, boolean)*: If `true`, network round-trips are minimized for cross-cluster search requests. +** *`max_concurrent_searches` (Optional, number)*: The maximum number of concurrent searches the API can run. +** *`search_type` (Optional, Enum("query_then_fetch" | "dfs_query_then_fetch"))*: The type of the search operation. +** *`rest_total_hits_as_int` (Optional, boolean)*: If `true`, the response returns `hits.total` as an integer. If `false`, it returns `hits.total` as an object. +** *`typed_keys` (Optional, boolean)*: If `true`, the response prefixes aggregation and suggester names with their respective types. + +[discrete] +=== mtermvectors +Get multiple term vectors. + +Get multiple term vectors with a single request. +You can specify existing documents by index and ID or provide artificial documents in the body of the request. +You can specify the index in the request body or request URI. +The response contains a `docs` array with all the fetched termvectors. +Each element has the structure provided by the termvectors API. + +**Artificial documents** + +You can also use `mtermvectors` to generate term vectors for artificial documents provided in the body of the request. +The mapping used is determined by the specified `_index`. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-mtermvectors[Endpoint documentation] +[source,ts] +---- +client.mtermvectors({ ... }) +---- +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (Optional, string)*: The name of the index that contains the documents. +** *`docs` (Optional, { _id, _index, routing, _source, stored_fields, version, version_type }[])*: An array of existing or artificial documents. +** *`ids` (Optional, string[])*: A simplified syntax to specify documents by their ID if they're in the same index. +** *`fields` (Optional, string | string[])*: A list or wildcard expressions of fields to include in the statistics. It is used as the default list unless a specific field list is provided in the `completion_fields` or `fielddata_fields` parameters. +** *`field_statistics` (Optional, boolean)*: If `true`, the response includes the document count, sum of document frequencies, and sum of total term frequencies. +** *`offsets` (Optional, boolean)*: If `true`, the response includes term offsets. +** *`payloads` (Optional, boolean)*: If `true`, the response includes term payloads. +** *`positions` (Optional, boolean)*: If `true`, the response includes term positions. +** *`preference` (Optional, string)*: The node or shard the operation should be performed on. It is random by default. +** *`realtime` (Optional, boolean)*: If true, the request is real-time as opposed to near-real-time. +** *`routing` (Optional, string)*: A custom value used to route operations to a specific shard. +** *`term_statistics` (Optional, boolean)*: If true, the response includes term frequency and document frequency. +** *`version` (Optional, number)*: If `true`, returns the document version as part of a hit. +** *`version_type` (Optional, Enum("internal" | "external" | "external_gte" | "force"))*: The version type. + +[discrete] +=== open_point_in_time +Open a point in time. + +A search request by default runs against the most recent visible data of the target indices, +which is called point in time. Elasticsearch pit (point in time) is a lightweight view into the +state of the data as it existed when initiated. In some cases, it’s preferred to perform multiple +search requests using the same point in time. For example, if refreshes happen between +`search_after` requests, then the results of those requests might not be consistent as changes happening +between searches are only visible to the more recent point in time. + +A point in time must be opened explicitly before being used in search requests. + +A subsequent search request with the `pit` parameter must not specify `index`, `routing`, or `preference` values as these parameters are copied from the point in time. + +Just like regular searches, you can use `from` and `size` to page through point in time search results, up to the first 10,000 hits. +If you want to retrieve more hits, use PIT with `search_after`. + +IMPORTANT: The open point in time request and each subsequent search request can return different identifiers; always use the most recently received ID for the next search request. + +When a PIT that contains shard failures is used in a search request, the missing are always reported in the search response as a `NoShardAvailableActionException` exception. +To get rid of these exceptions, a new PIT needs to be created so that shards missing from the previous PIT can be handled, assuming they become available in the meantime. + +**Keeping point in time alive** + +The `keep_alive` parameter, which is passed to a open point in time request and search request, extends the time to live of the corresponding point in time. +The value does not need to be long enough to process all data — it just needs to be long enough for the next request. + +Normally, the background merge process optimizes the index by merging together smaller segments to create new, bigger segments. +Once the smaller segments are no longer needed they are deleted. +However, open point-in-times prevent the old segments from being deleted since they are still in use. + +TIP: Keeping older segments alive means that more disk space and file handles are needed. +Ensure that you have configured your nodes to have ample free file handles. + +Additionally, if a segment contains deleted or updated documents then the point in time must keep track of whether each document in the segment was live at the time of the initial search request. +Ensure that your nodes have sufficient heap space if you have many open point-in-times on an index that is subject to ongoing deletes or updates. +Note that a point-in-time doesn't prevent its associated indices from being deleted. +You can check how many point-in-times (that is, search contexts) are open with the nodes stats API. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-open-point-in-time[Endpoint documentation] +[source,ts] +---- +client.openPointInTime({ index, keep_alive }) +---- +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (string | string[])*: A list of index names to open point in time; use `_all` or empty string to perform the operation on all indices +** *`keep_alive` (string | -1 | 0)*: Extend the length of time that the point in time persists. +** *`index_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Filter indices if the provided query rewrites to `match_none` on every shard. +** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error if it targets a missing or closed index. +** *`preference` (Optional, string)*: The node or shard the operation should be performed on. By default, it is random. +** *`routing` (Optional, string)*: A custom value that is used to route operations to a specific shard. +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. +** *`allow_partial_search_results` (Optional, boolean)*: Indicates whether the point in time tolerates unavailable shards or shard failures when initially creating the PIT. If `false`, creating a point in time request when a shard is missing or unavailable will throw an exception. If `true`, the point in time will contain all the shards that are available at the time of the request. + +[discrete] +=== ping +Ping the cluster. +Get information about whether the cluster is running. + +https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-cluster[Endpoint documentation] +[source,ts] +---- +client.ping() +---- + +[discrete] +=== put_script +Create or update a script or search template. +Creates or updates a stored script or search template. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-put-script[Endpoint documentation] +[source,ts] +---- +client.putScript({ id, script }) +---- +[discrete] +==== Arguments + +* *Request (object):* +** *`id` (string)*: The identifier for the stored script or search template. It must be unique within the cluster. +** *`script` ({ lang, options, source })*: The script or search template, its parameters, and its language. +** *`context` (Optional, string)*: The context in which the script or search template should run. To prevent errors, the API immediately compiles the script or template in this context. +** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout. +** *`timeout` (Optional, string | -1 | 0)*: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout. + +[discrete] +=== rank_eval +Evaluate ranked search results. + +Evaluate the quality of ranked search results over a set of typical search queries. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rank-eval[Endpoint documentation] +[source,ts] +---- +client.rankEval({ requests }) +---- +[discrete] +==== Arguments + +* *Request (object):* +** *`requests` ({ id, request, ratings, template_id, params }[])*: A set of typical search requests, together with their provided ratings. +** *`index` (Optional, string | string[])*: A list of data streams, indices, and index aliases used to limit the request. Wildcard (`*`) expressions are supported. To target all data streams and indices in a cluster, omit this parameter or use `_all` or `*`. +** *`metric` (Optional, { precision, recall, mean_reciprocal_rank, dcg, expected_reciprocal_rank })*: Definition of the evaluation metric to calculate. +** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Whether to expand wildcard expression to concrete indices that are open, closed or both. +** *`ignore_unavailable` (Optional, boolean)*: If `true`, missing or closed indices are not included in the response. +** *`search_type` (Optional, string)*: Search operation type + +[discrete] +=== reindex +Reindex documents. + +Copy documents from a source to a destination. +You can copy all documents to the destination index or reindex a subset of the documents. +The source can be any existing index, alias, or data stream. +The destination must differ from the source. +For example, you cannot reindex a data stream into itself. + +IMPORTANT: Reindex requires `_source` to be enabled for all documents in the source. +The destination should be configured as wanted before calling the reindex API. +Reindex does not copy the settings from the source or its associated template. +Mappings, shard counts, and replicas, for example, must be configured ahead of time. + +If the Elasticsearch security features are enabled, you must have the following security privileges: + +* The `read` index privilege for the source data stream, index, or alias. +* The `write` index privilege for the destination data stream, index, or index alias. +* To automatically create a data stream or index with a reindex API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege for the destination data stream, index, or alias. +* If reindexing from a remote cluster, the `source.remote.user` must have the `monitor` cluster privilege and the `read` index privilege for the source data stream, index, or alias. + +If reindexing from a remote cluster, you must explicitly allow the remote host in the `reindex.remote.whitelist` setting. +Automatic data stream creation requires a matching index template with data stream enabled. + +The `dest` element can be configured like the index API to control optimistic concurrency control. +Omitting `version_type` or setting it to `internal` causes Elasticsearch to blindly dump documents into the destination, overwriting any that happen to have the same ID. + +Setting `version_type` to `external` causes Elasticsearch to preserve the `version` from the source, create any documents that are missing, and update any documents that have an older version in the destination than they do in the source. + +Setting `op_type` to `create` causes the reindex API to create only missing documents in the destination. +All existing documents will cause a version conflict. + +IMPORTANT: Because data streams are append-only, any reindex request to a destination data stream must have an `op_type` of `create`. +A reindex can only add new documents to a destination data stream. +It cannot update existing documents in a destination data stream. + +By default, version conflicts abort the reindex process. +To continue reindexing if there are conflicts, set the `conflicts` request body property to `proceed`. +In this case, the response includes a count of the version conflicts that were encountered. +Note that the handling of other error types is unaffected by the `conflicts` property. +Additionally, if you opt to count version conflicts, the operation could attempt to reindex more documents from the source than `max_docs` until it has successfully indexed `max_docs` documents into the target or it has gone through every document in the source query. + +NOTE: The reindex API makes no effort to handle ID collisions. +The last document written will "win" but the order isn't usually predictable so it is not a good idea to rely on this behavior. +Instead, make sure that IDs are unique by using a script. + +**Running reindex asynchronously** + +If the request contains `wait_for_completion=false`, Elasticsearch performs some preflight checks, launches the request, and returns a task you can use to cancel or get the status of the task. +Elasticsearch creates a record of this task as a document at `_tasks/`. + +**Reindex from multiple sources** + +If you have many sources to reindex it is generally better to reindex them one at a time rather than using a glob pattern to pick up multiple sources. +That way you can resume the process if there are any errors by removing the partially completed source and starting over. +It also makes parallelizing the process fairly simple: split the list of sources to reindex and run each list in parallel. + +For example, you can use a bash script like this: + +---- +for index in i1 i2 i3 i4 i5; do + curl -HContent-Type:application/json -XPOST localhost:9200/_reindex?pretty -d'{ + "source": { + "index": "'$index'" + }, + "dest": { + "index": "'$index'-reindexed" + } + }' +done +---- + +**Throttling** + +Set `requests_per_second` to any positive decimal number (`1.4`, `6`, `1000`, for example) to throttle the rate at which reindex issues batches of index operations. +Requests are throttled by padding each batch with a wait time. +To turn off throttling, set `requests_per_second` to `-1`. + +The throttling is done by waiting between batches so that the scroll that reindex uses internally can be given a timeout that takes into account the padding. +The padding time is the difference between the batch size divided by the `requests_per_second` and the time spent writing. +By default the batch size is `1000`, so if `requests_per_second` is set to `500`: + +---- +target_time = 1000 / 500 per second = 2 seconds +wait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds +---- + +Since the batch is issued as a single bulk request, large batch sizes cause Elasticsearch to create many requests and then wait for a while before starting the next set. +This is "bursty" instead of "smooth". + +**Slicing** + +Reindex supports sliced scroll to parallelize the reindexing process. +This parallelization can improve efficiency and provide a convenient way to break the request down into smaller parts. + +NOTE: Reindexing from remote clusters does not support manual or automatic slicing. + +You can slice a reindex request manually by providing a slice ID and total number of slices to each request. +You can also let reindex automatically parallelize by using sliced scroll to slice on `_id`. +The `slices` parameter specifies the number of slices to use. + +Adding `slices` to the reindex request just automates the manual process, creating sub-requests which means it has some quirks: + +* You can see these requests in the tasks API. These sub-requests are "child" tasks of the task for the request with slices. +* Fetching the status of the task for the request with `slices` only contains the status of completed slices. +* These sub-requests are individually addressable for things like cancellation and rethrottling. +* Rethrottling the request with `slices` will rethrottle the unfinished sub-request proportionally. +* Canceling the request with `slices` will cancel each sub-request. +* Due to the nature of `slices`, each sub-request won't get a perfectly even portion of the documents. All documents will be addressed, but some slices may be larger than others. Expect larger slices to have a more even distribution. +* Parameters like `requests_per_second` and `max_docs` on a request with `slices` are distributed proportionally to each sub-request. Combine that with the previous point about distribution being uneven and you should conclude that using `max_docs` with `slices` might not result in exactly `max_docs` documents being reindexed. +* Each sub-request gets a slightly different snapshot of the source, though these are all taken at approximately the same time. + +If slicing automatically, setting `slices` to `auto` will choose a reasonable number for most indices. +If slicing manually or otherwise tuning automatic slicing, use the following guidelines. + +Query performance is most efficient when the number of slices is equal to the number of shards in the index. +If that number is large (for example, `500`), choose a lower number as too many slices will hurt performance. +Setting slices higher than the number of shards generally does not improve efficiency and adds overhead. + +Indexing performance scales linearly across available resources with the number of slices. + +Whether query or indexing performance dominates the runtime depends on the documents being reindexed and cluster resources. + +**Modify documents during reindexing** + +Like `_update_by_query`, reindex operations support a script that modifies the document. +Unlike `_update_by_query`, the script is allowed to modify the document's metadata. + +Just as in `_update_by_query`, you can set `ctx.op` to change the operation that is run on the destination. +For example, set `ctx.op` to `noop` if your script decides that the document doesn’t have to be indexed in the destination. This "no operation" will be reported in the `noop` counter in the response body. +Set `ctx.op` to `delete` if your script decides that the document must be deleted from the destination. +The deletion will be reported in the `deleted` counter in the response body. +Setting `ctx.op` to anything else will return an error, as will setting any other field in `ctx`. + +Think of the possibilities! Just be careful; you are able to change: + +* `_id` +* `_index` +* `_version` +* `_routing` + +Setting `_version` to `null` or clearing it from the `ctx` map is just like not sending the version in an indexing request. +It will cause the document to be overwritten in the destination regardless of the version on the target or the version type you use in the reindex API. + +**Reindex from remote** + +Reindex supports reindexing from a remote Elasticsearch cluster. +The `host` parameter must contain a scheme, host, port, and optional path. +The `username` and `password` parameters are optional and when they are present the reindex operation will connect to the remote Elasticsearch node using basic authentication. +Be sure to use HTTPS when using basic authentication or the password will be sent in plain text. +There are a range of settings available to configure the behavior of the HTTPS connection. + +When using Elastic Cloud, it is also possible to authenticate against the remote cluster through the use of a valid API key. +Remote hosts must be explicitly allowed with the `reindex.remote.whitelist` setting. +It can be set to a comma delimited list of allowed remote host and port combinations. +Scheme is ignored; only the host and port are used. +For example: + +---- +reindex.remote.whitelist: [otherhost:9200, another:9200, 127.0.10.*:9200, localhost:*"] +---- + +The list of allowed hosts must be configured on any nodes that will coordinate the reindex. +This feature should work with remote clusters of any version of Elasticsearch. +This should enable you to upgrade from any version of Elasticsearch to the current version by reindexing from a cluster of the old version. + +WARNING: Elasticsearch does not support forward compatibility across major versions. +For example, you cannot reindex from a 7.x cluster into a 6.x cluster. + +To enable queries sent to older versions of Elasticsearch, the `query` parameter is sent directly to the remote host without validation or modification. + +NOTE: Reindexing from remote clusters does not support manual or automatic slicing. + +Reindexing from a remote server uses an on-heap buffer that defaults to a maximum size of 100mb. +If the remote index includes very large documents you'll need to use a smaller batch size. +It is also possible to set the socket read timeout on the remote connection with the `socket_timeout` field and the connection timeout with the `connect_timeout` field. +Both default to 30 seconds. + +**Configuring SSL parameters** + +Reindex from remote supports configurable SSL settings. +These must be specified in the `elasticsearch.yml` file, with the exception of the secure settings, which you add in the Elasticsearch keystore. +It is not possible to configure SSL in the body of the reindex request. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-reindex[Endpoint documentation] +[source,ts] +---- +client.reindex({ dest, source }) +---- +[discrete] +==== Arguments + +* *Request (object):* +** *`dest` ({ index, op_type, pipeline, routing, version_type })*: The destination you are copying to. +** *`source` ({ index, query, remote, size, slice, sort, _source, runtime_mappings })*: The source you are copying from. +** *`conflicts` (Optional, Enum("abort" | "proceed"))*: Indicates whether to continue reindexing even when there are conflicts. +** *`max_docs` (Optional, number)*: The maximum number of documents to reindex. By default, all documents are reindexed. If it is a value less then or equal to `scroll_size`, a scroll will not be used to retrieve the results for the operation. If `conflicts` is set to `proceed`, the reindex operation could attempt to reindex more documents from the source than `max_docs` until it has successfully indexed `max_docs` documents into the target or it has gone through every document in the source query. +** *`script` (Optional, { source, id, params, lang, options })*: The script to run to update the document source or metadata when reindexing. +** *`size` (Optional, number)* +** *`refresh` (Optional, boolean)*: If `true`, the request refreshes affected shards to make this operation visible to search. +** *`requests_per_second` (Optional, float)*: The throttle for this request in sub-requests per second. By default, there is no throttle. +** *`scroll` (Optional, string | -1 | 0)*: The period of time that a consistent view of the index should be maintained for scrolled search. +** *`slices` (Optional, number | Enum("auto"))*: The number of slices this task should be divided into. It defaults to one slice, which means the task isn't sliced into subtasks. Reindex supports sliced scroll to parallelize the reindexing process. This parallelization can improve efficiency and provide a convenient way to break the request down into smaller parts. NOTE: Reindexing from remote clusters does not support manual or automatic slicing. If set to `auto`, Elasticsearch chooses the number of slices to use. This setting will use one slice per shard, up to a certain limit. If there are multiple sources, it will choose the number of slices based on the index or backing index with the smallest number of shards. +** *`timeout` (Optional, string | -1 | 0)*: The period each indexing waits for automatic index creation, dynamic mapping updates, and waiting for active shards. By default, Elasticsearch waits for at least one minute before failing. The actual wait time could be longer, particularly when multiple waits occur. +** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: The number of shard copies that must be active before proceeding with the operation. Set it to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). The default value is one, which means it waits for each primary shard to be active. +** *`wait_for_completion` (Optional, boolean)*: If `true`, the request blocks until the operation is complete. +** *`require_alias` (Optional, boolean)*: If `true`, the destination must be an index alias. + +[discrete] +=== reindex_rethrottle +Throttle a reindex operation. + +Change the number of requests per second for a particular reindex operation. +For example: + +---- +POST _reindex/r1A2WoRbTwKZ516z6NEs5A:36619/_rethrottle?requests_per_second=-1 +---- + +Rethrottling that speeds up the query takes effect immediately. +Rethrottling that slows down the query will take effect after completing the current batch. +This behavior prevents scroll timeouts. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-reindex[Endpoint documentation] +[source,ts] +---- +client.reindexRethrottle({ task_id }) +---- +[discrete] +==== Arguments + +* *Request (object):* +** *`task_id` (string)*: The task identifier, which can be found by using the tasks API. +** *`requests_per_second` (Optional, float)*: The throttle for this request in sub-requests per second. It can be either `-1` to turn off throttling or any decimal number like `1.7` or `12` to throttle to that level. + +[discrete] +=== render_search_template +Render a search template. + +Render a search template as a search request body. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-render-search-template[Endpoint documentation] +[source,ts] +---- +client.renderSearchTemplate({ ... }) +---- +[discrete] +==== Arguments + +* *Request (object):* +** *`id` (Optional, string)*: The ID of the search template to render. If no `source` is specified, this or the `id` request body parameter is required. +** *`file` (Optional, string)* +** *`params` (Optional, Record)*: Key-value pairs used to replace Mustache variables in the template. The key is the variable name. The value is the variable value. +** *`source` (Optional, string)*: An inline search template. It supports the same parameters as the search API's request body. These parameters also support Mustache variables. If no `id` or `` is specified, this parameter is required. + +[discrete] +=== scripts_painless_execute +Run a script. + +Runs a script and returns a result. +Use this API to build and test scripts, such as when defining a script for a runtime field. +This API requires very few dependencies and is especially useful if you don't have permissions to write documents on a cluster. + +The API uses several _contexts_, which control how scripts are run, what variables are available at runtime, and what the return type is. + +Each context requires a script, but additional parameters depend on the context you're using for that script. + +{painless}/painless-execute-api.html[Endpoint documentation] +[source,ts] +---- +client.scriptsPainlessExecute({ ... }) +---- +[discrete] +==== Arguments + +* *Request (object):* +** *`context` (Optional, Enum("painless_test" | "filter" | "score" | "boolean_field" | "date_field" | "double_field" | "geo_point_field" | "ip_field" | "keyword_field" | "long_field" | "composite_field"))*: The context that the script should run in. NOTE: Result ordering in the field contexts is not guaranteed. +** *`context_setup` (Optional, { document, index, query })*: Additional parameters for the `context`. NOTE: This parameter is required for all contexts except `painless_test`, which is the default if no value is provided for `context`. +** *`script` (Optional, { source, id, params, lang, options })*: The Painless script to run. + +[discrete] +=== scroll +Run a scrolling search. + +IMPORTANT: The scroll API is no longer recommend for deep pagination. If you need to preserve the index state while paging through more than 10,000 hits, use the `search_after` parameter with a point in time (PIT). + +The scroll API gets large sets of results from a single scrolling search request. +To get the necessary scroll ID, submit a search API request that includes an argument for the `scroll` query parameter. +The `scroll` parameter indicates how long Elasticsearch should retain the search context for the request. +The search response returns a scroll ID in the `_scroll_id` response body parameter. +You can then use the scroll ID with the scroll API to retrieve the next batch of results for the request. +If the Elasticsearch security features are enabled, the access to the results of a specific scroll ID is restricted to the user or API key that submitted the search. + +You can also use the scroll API to specify a new scroll parameter that extends or shortens the retention period for the search context. + +IMPORTANT: Results from a scrolling search reflect the state of the index at the time of the initial search request. Subsequent indexing or document changes only affect later search and scroll requests. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-scroll[Endpoint documentation] +[source,ts] +---- +client.scroll({ scroll_id }) +---- +[discrete] +==== Arguments + +* *Request (object):* +** *`scroll_id` (string)*: The scroll ID of the search. +** *`scroll` (Optional, string | -1 | 0)*: The period to retain the search context for scrolling. +** *`rest_total_hits_as_int` (Optional, boolean)*: If true, the API response’s hit.total property is returned as an integer. If false, the API response’s hit.total property is returned as an object. + +[discrete] +=== search +Run a search. + +Get search hits that match the query defined in the request. +You can provide search queries using the `q` query string parameter or the request body. +If both are specified, only the query parameter is used. + +If the Elasticsearch security features are enabled, you must have the read index privilege for the target data stream, index, or alias. For cross-cluster search, refer to the documentation about configuring CCS privileges. +To search a point in time (PIT) for an alias, you must have the `read` index privilege for the alias's data streams or indices. + +**Search slicing** + +When paging through a large number of documents, it can be helpful to split the search into multiple slices to consume them independently with the `slice` and `pit` properties. +By default the splitting is done first on the shards, then locally on each shard. +The local splitting partitions the shard into contiguous ranges based on Lucene document IDs. + +For instance if the number of shards is equal to 2 and you request 4 slices, the slices 0 and 2 are assigned to the first shard and the slices 1 and 3 are assigned to the second shard. + +IMPORTANT: The same point-in-time ID should be used for all slices. +If different PIT IDs are used, slices can overlap and miss documents. +This situation can occur because the splitting criterion is based on Lucene document IDs, which are not stable across changes to the index. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search[Endpoint documentation] +[source,ts] +---- +client.search({ ... }) +---- +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (Optional, string | string[])*: A list of data streams, indices, and aliases to search. It supports wildcards (`*`). To search all data streams and indices, omit this parameter or use `*` or `_all`. +** *`aggregations` (Optional, Record)*: Defines the aggregations that are run as part of the search request. +** *`collapse` (Optional, { field, inner_hits, max_concurrent_group_searches, collapse })*: Collapses search results the values of the specified field. +** *`explain` (Optional, boolean)*: If `true`, the request returns detailed information about score computation as part of a hit. +** *`ext` (Optional, Record)*: Configuration of search extensions defined by Elasticsearch plugins. +** *`from` (Optional, number)*: The starting document offset, which must be non-negative. By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the `search_after` parameter. +** *`highlight` (Optional, { encoder, fields })*: Specifies the highlighter to use for retrieving highlighted snippets from one or more fields in your search results. +** *`track_total_hits` (Optional, boolean | number)*: Number of hits matching the query to count accurately. If `true`, the exact number of hits is returned at the cost of some performance. If `false`, the response does not include the total number of hits matching the query. +** *`indices_boost` (Optional, Record[])*: Boost the `_score` of documents from specified indices. The boost value is the factor by which scores are multiplied. A boost value greater than `1.0` increases the score. A boost value between `0` and `1.0` decreases the score. +** *`docvalue_fields` (Optional, { field, format, include_unmapped }[])*: An array of wildcard (`*`) field patterns. The request returns doc values for field names matching these patterns in the `hits.fields` property of the response. +** *`knn` (Optional, { field, query_vector, query_vector_builder, k, num_candidates, boost, filter, similarity, inner_hits, rescore_vector } | { field, query_vector, query_vector_builder, k, num_candidates, boost, filter, similarity, inner_hits, rescore_vector }[])*: The approximate kNN search to run. +** *`rank` (Optional, { rrf })*: The Reciprocal Rank Fusion (RRF) to use. +** *`min_score` (Optional, number)*: The minimum `_score` for matching documents. Documents with a lower `_score` are not included in the search results. +** *`post_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Use the `post_filter` parameter to filter search results. The search hits are filtered after the aggregations are calculated. A post filter has no impact on the aggregation results. +** *`profile` (Optional, boolean)*: Set to `true` to return detailed timing information about the execution of individual components in a search request. NOTE: This is a debugging tool and adds significant overhead to search execution. +** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: The search definition using the Query DSL. +** *`rescore` (Optional, { window_size, query, learning_to_rank } | { window_size, query, learning_to_rank }[])*: Can be used to improve precision by reordering just the top (for example 100 - 500) documents returned by the `query` and `post_filter` phases. +** *`retriever` (Optional, { standard, knn, rrf, text_similarity_reranker, rule })*: A retriever is a specification to describe top documents returned from a search. A retriever replaces other elements of the search API that also return top documents such as `query` and `knn`. +** *`script_fields` (Optional, Record)*: Retrieve a script evaluation (based on different fields) for each hit. +** *`search_after` (Optional, number | number | string | boolean | null[])*: Used to retrieve the next page of hits using a set of sort values from the previous page. +** *`size` (Optional, number)*: The number of hits to return, which must not be negative. By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the `search_after` property. +** *`slice` (Optional, { field, id, max })*: Split a scrolled search into multiple slices that can be consumed independently. +** *`sort` (Optional, string | { _score, _doc, _geo_distance, _script } | string | { _score, _doc, _geo_distance, _script }[])*: A list of : pairs. +** *`_source` (Optional, boolean | { excludes, includes })*: The source fields that are returned for matching documents. These fields are returned in the `hits._source` property of the search response. If the `stored_fields` property is specified, the `_source` property defaults to `false`. Otherwise, it defaults to `true`. +** *`fields` (Optional, { field, format, include_unmapped }[])*: An array of wildcard (`*`) field patterns. The request returns values for field names matching these patterns in the `hits.fields` property of the response. +** *`suggest` (Optional, { text })*: Defines a suggester that provides similar looking terms based on a provided text. +** *`terminate_after` (Optional, number)*: The maximum number of documents to collect for each shard. If a query reaches this limit, Elasticsearch terminates the query early. Elasticsearch collects documents before sorting. IMPORTANT: Use with caution. Elasticsearch applies this property to each shard handling the request. When possible, let Elasticsearch perform early termination automatically. Avoid specifying this property for requests that target data streams with backing indices across multiple data tiers. If set to `0` (default), the query does not terminate early. +** *`timeout` (Optional, string)*: The period of time to wait for a response from each shard. If no response is received before the timeout expires, the request fails and returns an error. Defaults to no timeout. +** *`track_scores` (Optional, boolean)*: If `true`, calculate and return document scores, even if the scores are not used for sorting. +** *`version` (Optional, boolean)*: If `true`, the request returns the document version as part of a hit. +** *`seq_no_primary_term` (Optional, boolean)*: If `true`, the request returns sequence number and primary term of the last modification of each hit. +** *`stored_fields` (Optional, string | string[])*: A list of stored fields to return as part of a hit. If no fields are specified, no stored fields are included in the response. If this field is specified, the `_source` property defaults to `false`. You can pass `_source: true` to return both source fields and stored fields in the search response. +** *`pit` (Optional, { id, keep_alive })*: Limit the search to a point in time (PIT). If you provide a PIT, you cannot specify an `` in the request path. +** *`runtime_mappings` (Optional, Record)*: One or more runtime fields in the search request. These fields take precedence over mapped fields with the same name. +** *`stats` (Optional, string[])*: The stats groups to associate with the search. Each group maintains a statistics aggregation for its associated searches. You can retrieve these stats using the indices stats API. +** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. +** *`allow_partial_search_results` (Optional, boolean)*: If `true` and there are shard request timeouts or shard failures, the request returns partial results. If `false`, it returns an error with no partial results. To override the default behavior, you can set the `search.default_allow_partial_results` cluster setting to `false`. +** *`analyzer` (Optional, string)*: The analyzer to use for the query string. This parameter can be used only when the `q` query string parameter is specified. +** *`analyze_wildcard` (Optional, boolean)*: If `true`, wildcard and prefix queries are analyzed. This parameter can be used only when the `q` query string parameter is specified. +** *`batched_reduce_size` (Optional, number)*: The number of shard results that should be reduced at once on the coordinating node. If the potential number of shards in the request can be large, this value should be used as a protection mechanism to reduce the memory overhead per search request. +** *`ccs_minimize_roundtrips` (Optional, boolean)*: If `true`, network round-trips between the coordinating node and the remote clusters are minimized when running cross-cluster search (CCS) requests. +** *`default_operator` (Optional, Enum("and" | "or"))*: The default operator for the query string query: `AND` or `OR`. This parameter can be used only when the `q` query string parameter is specified. +** *`df` (Optional, string)*: The field to use as a default when no field prefix is given in the query string. This parameter can be used only when the `q` query string parameter is specified. +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports a list of values such as `open,hidden`. +** *`ignore_throttled` (Optional, boolean)*: If `true`, concrete, expanded or aliased indices will be ignored when frozen. +** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error if it targets a missing or closed index. +** *`include_named_queries_score` (Optional, boolean)*: If `true`, the response includes the score contribution from any named queries. This functionality reruns each named query on every hit in a search response. Typically, this adds a small overhead to a request. However, using computationally expensive named queries on a large number of hits may add significant overhead. +** *`lenient` (Optional, boolean)*: If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. This parameter can be used only when the `q` query string parameter is specified. +** *`max_concurrent_shard_requests` (Optional, number)*: The number of concurrent shard requests per node that the search runs concurrently. This value should be used to limit the impact of the search on the cluster in order to limit the number of concurrent shard requests. +** *`preference` (Optional, string)*: The nodes and shards used for the search. By default, Elasticsearch selects from eligible nodes and shards using adaptive replica selection, accounting for allocation awareness. Valid values are: * `_only_local` to run the search only on shards on the local node. * `_local` to, if possible, run the search on shards on the local node, or if not, select shards using the default method. * `_only_nodes:,` to run the search on only the specified nodes IDs. If suitable shards exist on more than one selected node, use shards on those nodes using the default method. If none of the specified nodes are available, select shards from any available node using the default method. * `_prefer_nodes:,` to if possible, run the search on the specified nodes IDs. If not, select shards using the default method. `_shards:,` to run the search only on the specified shards. You can combine this value with other `preference` values. However, the `_shards` value must come first. For example: `_shards:2,3|_local`. `` (any string that does not start with `_`) to route searches with the same `` to the same shards in the same order. +** *`pre_filter_shard_size` (Optional, number)*: A threshold that enforces a pre-filter roundtrip to prefilter search shards based on query rewriting if the number of shards the search request expands to exceeds the threshold. This filter roundtrip can limit the number of shards significantly if for instance a shard can not match any documents based on its rewrite method (if date filters are mandatory to match but the shard bounds and the query are disjoint). When unspecified, the pre-filter phase is executed if any of these conditions is met: * The request targets more than 128 shards. * The request targets one or more read-only index. * The primary sort of the query targets an indexed field. +** *`request_cache` (Optional, boolean)*: If `true`, the caching of search results is enabled for requests where `size` is `0`. It defaults to index level settings. +** *`routing` (Optional, string)*: A custom value that is used to route operations to a specific shard. +** *`scroll` (Optional, string | -1 | 0)*: The period to retain the search context for scrolling. By default, this value cannot exceed `1d` (24 hours). You can change this limit by using the `search.max_keep_alive` cluster-level setting. +** *`search_type` (Optional, Enum("query_then_fetch" | "dfs_query_then_fetch"))*: Indicates how distributed term frequencies are calculated for relevance scoring. +** *`suggest_field` (Optional, string)*: The field to use for suggestions. +** *`suggest_mode` (Optional, Enum("missing" | "popular" | "always"))*: The suggest mode. This parameter can be used only when the `suggest_field` and `suggest_text` query string parameters are specified. +** *`suggest_size` (Optional, number)*: The number of suggestions to return. This parameter can be used only when the `suggest_field` and `suggest_text` query string parameters are specified. +** *`suggest_text` (Optional, string)*: The source text for which the suggestions should be returned. This parameter can be used only when the `suggest_field` and `suggest_text` query string parameters are specified. +** *`typed_keys` (Optional, boolean)*: If `true`, aggregation and suggester names are be prefixed by their respective types in the response. +** *`rest_total_hits_as_int` (Optional, boolean)*: Indicates whether `hits.total` should be rendered as an integer or an object in the rest search response. +** *`_source_excludes` (Optional, string | string[])*: A list of source fields to exclude from the response. You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. +** *`_source_includes` (Optional, string | string[])*: A list of source fields to include in the response. If this parameter is specified, only these source fields are returned. You can exclude fields from this subset using the `_source_excludes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. +** *`q` (Optional, string)*: A query in the Lucene query string syntax. Query parameter searches do not support the full Elasticsearch Query DSL but are handy for testing. IMPORTANT: This parameter overrides the query parameter in the request body. If both parameters are specified, documents matching the query request body parameter are not returned. +** *`force_synthetic_source` (Optional, boolean)*: Should this request force synthetic _source? Use this to test if the mapping supports synthetic _source and to get a sense of the worst case performance. Fetches with this enabled will be slower the enabling synthetic source natively in the index. + +[discrete] +=== search_mvt +Search a vector tile. + +Search a vector tile for geospatial values. +Before using this API, you should be familiar with the Mapbox vector tile specification. +The API returns results as a binary mapbox vector tile. + +Internally, Elasticsearch translates a vector tile search API request into a search containing: + +* A `geo_bounding_box` query on the ``. The query uses the `//` tile as a bounding box. +* A `geotile_grid` or `geohex_grid` aggregation on the ``. The `grid_agg` parameter determines the aggregation type. The aggregation uses the `//` tile as a bounding box. +* Optionally, a `geo_bounds` aggregation on the ``. The search only includes this aggregation if the `exact_bounds` parameter is `true`. +* If the optional parameter `with_labels` is `true`, the internal search will include a dynamic runtime field that calls the `getLabelPosition` function of the geometry doc value. This enables the generation of new point features containing suggested geometry labels, so that, for example, multi-polygons will have only one label. + +For example, Elasticsearch may translate a vector tile search API request with a `grid_agg` argument of `geotile` and an `exact_bounds` argument of `true` into the following search + +---- +GET my-index/_search +{ + "size": 10000, + "query": { + "geo_bounding_box": { + "my-geo-field": { + "top_left": { + "lat": -40.979898069620134, + "lon": -45 + }, + "bottom_right": { + "lat": -66.51326044311186, + "lon": 0 + } + } + } + }, + "aggregations": { + "grid": { + "geotile_grid": { + "field": "my-geo-field", + "precision": 11, + "size": 65536, + "bounds": { + "top_left": { + "lat": -40.979898069620134, + "lon": -45 + }, + "bottom_right": { + "lat": -66.51326044311186, + "lon": 0 + } + } + } + }, + "bounds": { + "geo_bounds": { + "field": "my-geo-field", + "wrap_longitude": false + } + } + } +} +---- + +The API returns results as a binary Mapbox vector tile. +Mapbox vector tiles are encoded as Google Protobufs (PBF). By default, the tile contains three layers: + +* A `hits` layer containing a feature for each `` value matching the `geo_bounding_box` query. +* An `aggs` layer containing a feature for each cell of the `geotile_grid` or `geohex_grid`. The layer only contains features for cells with matching data. +* A meta layer containing: + * A feature containing a bounding box. By default, this is the bounding box of the tile. + * Value ranges for any sub-aggregations on the `geotile_grid` or `geohex_grid`. + * Metadata for the search. + +The API only returns features that can display at its zoom level. +For example, if a polygon feature has no area at its zoom level, the API omits it. +The API returns errors as UTF-8 encoded JSON. + +IMPORTANT: You can specify several options for this API as either a query parameter or request body parameter. +If you specify both parameters, the query parameter takes precedence. + +**Grid precision for geotile** + +For a `grid_agg` of `geotile`, you can use cells in the `aggs` layer as tiles for lower zoom levels. +`grid_precision` represents the additional zoom levels available through these cells. The final precision is computed by as follows: ` + grid_precision`. +For example, if `` is 7 and `grid_precision` is 8, then the `geotile_grid` aggregation will use a precision of 15. +The maximum final precision is 29. +The `grid_precision` also determines the number of cells for the grid as follows: `(2^grid_precision) x (2^grid_precision)`. +For example, a value of 8 divides the tile into a grid of 256 x 256 cells. +The `aggs` layer only contains features for cells with matching data. + +**Grid precision for geohex** + +For a `grid_agg` of `geohex`, Elasticsearch uses `` and `grid_precision` to calculate a final precision as follows: ` + grid_precision`. + +This precision determines the H3 resolution of the hexagonal cells produced by the `geohex` aggregation. +The following table maps the H3 resolution for each precision. +For example, if `` is 3 and `grid_precision` is 3, the precision is 6. +At a precision of 6, hexagonal cells have an H3 resolution of 2. +If `` is 3 and `grid_precision` is 4, the precision is 7. +At a precision of 7, hexagonal cells have an H3 resolution of 3. + +| Precision | Unique tile bins | H3 resolution | Unique hex bins | Ratio | +| --------- | ---------------- | ------------- | ----------------| ----- | +| 1 | 4 | 0 | 122 | 30.5 | +| 2 | 16 | 0 | 122 | 7.625 | +| 3 | 64 | 1 | 842 | 13.15625 | +| 4 | 256 | 1 | 842 | 3.2890625 | +| 5 | 1024 | 2 | 5882 | 5.744140625 | +| 6 | 4096 | 2 | 5882 | 1.436035156 | +| 7 | 16384 | 3 | 41162 | 2.512329102 | +| 8 | 65536 | 3 | 41162 | 0.6280822754 | +| 9 | 262144 | 4 | 288122 | 1.099098206 | +| 10 | 1048576 | 4 | 288122 | 0.2747745514 | +| 11 | 4194304 | 5 | 2016842 | 0.4808526039 | +| 12 | 16777216 | 6 | 14117882 | 0.8414913416 | +| 13 | 67108864 | 6 | 14117882 | 0.2103728354 | +| 14 | 268435456 | 7 | 98825162 | 0.3681524172 | +| 15 | 1073741824 | 8 | 691776122 | 0.644266719 | +| 16 | 4294967296 | 8 | 691776122 | 0.1610666797 | +| 17 | 17179869184 | 9 | 4842432842 | 0.2818666889 | +| 18 | 68719476736 | 10 | 33897029882 | 0.4932667053 | +| 19 | 274877906944 | 11 | 237279209162 | 0.8632167343 | +| 20 | 1099511627776 | 11 | 237279209162 | 0.2158041836 | +| 21 | 4398046511104 | 12 | 1660954464122 | 0.3776573213 | +| 22 | 17592186044416 | 13 | 11626681248842 | 0.6609003122 | +| 23 | 70368744177664 | 13 | 11626681248842 | 0.165225078 | +| 24 | 281474976710656 | 14 | 81386768741882 | 0.2891438866 | +| 25 | 1125899906842620 | 15 | 569707381193162 | 0.5060018015 | +| 26 | 4503599627370500 | 15 | 569707381193162 | 0.1265004504 | +| 27 | 18014398509482000 | 15 | 569707381193162 | 0.03162511259 | +| 28 | 72057594037927900 | 15 | 569707381193162 | 0.007906278149 | +| 29 | 288230376151712000 | 15 | 569707381193162 | 0.001976569537 | + +Hexagonal cells don't align perfectly on a vector tile. +Some cells may intersect more than one vector tile. +To compute the H3 resolution for each precision, Elasticsearch compares the average density of hexagonal bins at each resolution with the average density of tile bins at each zoom level. +Elasticsearch uses the H3 resolution that is closest to the corresponding geotile density. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-mvt[Endpoint documentation] +[source,ts] +---- +client.searchMvt({ index, field, zoom, x, y }) +---- +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (string | string[])*: List of data streams, indices, or aliases to search +** *`field` (string)*: Field containing geospatial data to return +** *`zoom` (number)*: Zoom level for the vector tile to search +** *`x` (number)*: X coordinate for the vector tile to search +** *`y` (number)*: Y coordinate for the vector tile to search +** *`aggs` (Optional, Record)*: Sub-aggregations for the geotile_grid. It supports the following aggregation types: - `avg` - `boxplot` - `cardinality` - `extended stats` - `max` - `median absolute deviation` - `min` - `percentile` - `percentile-rank` - `stats` - `sum` - `value count` The aggregation names can't start with `_mvt_`. The `_mvt_` prefix is reserved for internal aggregations. +** *`buffer` (Optional, number)*: The size, in pixels, of a clipping buffer outside the tile. This allows renderers to avoid outline artifacts from geometries that extend past the extent of the tile. +** *`exact_bounds` (Optional, boolean)*: If `false`, the meta layer's feature is the bounding box of the tile. If `true`, the meta layer's feature is a bounding box resulting from a `geo_bounds` aggregation. The aggregation runs on values that intersect the `//` tile with `wrap_longitude` set to `false`. The resulting bounding box may be larger than the vector tile. +** *`extent` (Optional, number)*: The size, in pixels, of a side of the tile. Vector tiles are square with equal sides. +** *`fields` (Optional, string | string[])*: The fields to return in the `hits` layer. It supports wildcards (`*`). This parameter does not support fields with array values. Fields with array values may return inconsistent results. +** *`grid_agg` (Optional, Enum("geotile" | "geohex"))*: The aggregation used to create a grid for the `field`. +** *`grid_precision` (Optional, number)*: Additional zoom levels available through the aggs layer. For example, if `` is `7` and `grid_precision` is `8`, you can zoom in up to level 15. Accepts 0-8. If 0, results don't include the aggs layer. +** *`grid_type` (Optional, Enum("grid" | "point" | "centroid"))*: Determines the geometry type for features in the aggs layer. In the aggs layer, each feature represents a `geotile_grid` cell. If `grid, each feature is a polygon of the cells bounding box. If `point`, each feature is a Point that is the centroid of the cell. +** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: The query DSL used to filter documents for the search. +** *`runtime_mappings` (Optional, Record)*: Defines one or more runtime fields in the search request. These fields take precedence over mapped fields with the same name. +** *`size` (Optional, number)*: The maximum number of features to return in the hits layer. Accepts 0-10000. If 0, results don't include the hits layer. +** *`sort` (Optional, string | { _score, _doc, _geo_distance, _script } | string | { _score, _doc, _geo_distance, _script }[])*: Sort the features in the hits layer. By default, the API calculates a bounding box for each feature. It sorts features based on this box's diagonal length, from longest to shortest. +** *`track_total_hits` (Optional, boolean | number)*: The number of hits matching the query to count accurately. If `true`, the exact number of hits is returned at the cost of some performance. If `false`, the response does not include the total number of hits matching the query. +** *`with_labels` (Optional, boolean)*: If `true`, the hits and aggs layers will contain additional point features representing suggested label positions for the original features. * `Point` and `MultiPoint` features will have one of the points selected. * `Polygon` and `MultiPolygon` features will have a single point generated, either the centroid, if it is within the polygon, or another point within the polygon selected from the sorted triangle-tree. * `LineString` features will likewise provide a roughly central point selected from the triangle-tree. * The aggregation results will provide one central point for each aggregation bucket. All attributes from the original features will also be copied to the new label features. In addition, the new features will be distinguishable using the tag `_mvt_label_position`. + +[discrete] +=== search_shards +Get the search shards. + +Get the indices and shards that a search request would be run against. +This information can be useful for working out issues or planning optimizations with routing and shard preferences. +When filtered aliases are used, the filter is returned as part of the `indices` section. + +If the Elasticsearch security features are enabled, you must have the `view_index_metadata` or `manage` index privilege for the target data stream, index, or alias. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-shards[Endpoint documentation] +[source,ts] +---- +client.searchShards({ ... }) +---- +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (Optional, string | string[])*: A list of data streams, indices, and aliases to search. It supports wildcards (`*`). To search all data streams and indices, omit this parameter or use `*` or `_all`. +** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. +** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error if it targets a missing or closed index. +** *`local` (Optional, boolean)*: If `true`, the request retrieves information from the local node only. +** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for a connection to the master node. If the master node is not available before the timeout expires, the request fails and returns an error. IT can also be set to `-1` to indicate that the request should never timeout. +** *`preference` (Optional, string)*: The node or shard the operation should be performed on. It is random by default. +** *`routing` (Optional, string)*: A custom value used to route operations to a specific shard. + +[discrete] +=== search_template +Run a search with a search template. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-template[Endpoint documentation] +[source,ts] +---- +client.searchTemplate({ ... }) +---- +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (Optional, string | string[])*: A list of data streams, indices, and aliases to search. It supports wildcards (`*`). +** *`explain` (Optional, boolean)*: If `true`, returns detailed information about score calculation as part of each hit. If you specify both this and the `explain` query parameter, the API uses only the query parameter. +** *`id` (Optional, string)*: The ID of the search template to use. If no `source` is specified, this parameter is required. +** *`params` (Optional, Record)*: Key-value pairs used to replace Mustache variables in the template. The key is the variable name. The value is the variable value. +** *`profile` (Optional, boolean)*: If `true`, the query execution is profiled. +** *`source` (Optional, string)*: An inline search template. Supports the same parameters as the search API's request body. It also supports Mustache variables. If no `id` is specified, this parameter is required. +** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. +** *`ccs_minimize_roundtrips` (Optional, boolean)*: If `true`, network round-trips are minimized for cross-cluster search requests. +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. +** *`ignore_throttled` (Optional, boolean)*: If `true`, specified concrete, expanded, or aliased indices are not included in the response when throttled. +** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error if it targets a missing or closed index. +** *`preference` (Optional, string)*: The node or shard the operation should be performed on. It is random by default. +** *`routing` (Optional, string)*: A custom value used to route operations to a specific shard. +** *`scroll` (Optional, string | -1 | 0)*: Specifies how long a consistent view of the index should be maintained for scrolled search. +** *`search_type` (Optional, Enum("query_then_fetch" | "dfs_query_then_fetch"))*: The type of the search operation. +** *`rest_total_hits_as_int` (Optional, boolean)*: If `true`, `hits.total` is rendered as an integer in the response. If `false`, it is rendered as an object. +** *`typed_keys` (Optional, boolean)*: If `true`, the response prefixes aggregation and suggester names with their respective types. + +[discrete] +=== terms_enum +Get terms in an index. + +Discover terms that match a partial string in an index. +This API is designed for low-latency look-ups used in auto-complete scenarios. + +> info +> The terms enum API may return terms from deleted documents. Deleted documents are initially only marked as deleted. It is not until their segments are merged that documents are actually deleted. Until that happens, the terms enum API will return terms from these documents. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-terms-enum[Endpoint documentation] +[source,ts] +---- +client.termsEnum({ index, field }) +---- +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (string)*: A list of data streams, indices, and index aliases to search. Wildcard (`*`) expressions are supported. To search all data streams or indices, omit this parameter or use `*` or `_all`. +** *`field` (string)*: The string to match at the start of indexed terms. If not provided, all terms in the field are considered. +** *`size` (Optional, number)*: The number of matching terms to return. +** *`timeout` (Optional, string | -1 | 0)*: The maximum length of time to spend collecting results. If the timeout is exceeded the `complete` flag set to `false` in the response and the results may be partial or empty. +** *`case_insensitive` (Optional, boolean)*: When `true`, the provided search string is matched against index terms without case sensitivity. +** *`index_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Filter an index shard if the provided query rewrites to `match_none`. +** *`string` (Optional, string)*: The string to match at the start of indexed terms. If it is not provided, all terms in the field are considered. > info > The prefix string cannot be larger than the largest possible keyword value, which is Lucene's term byte-length limit of 32766. +** *`search_after` (Optional, string)*: The string after which terms in the index should be returned. It allows for a form of pagination if the last result from one request is passed as the `search_after` parameter for a subsequent request. + +[discrete] +=== termvectors +Get term vector information. + +Get information and statistics about terms in the fields of a particular document. + +You can retrieve term vectors for documents stored in the index or for artificial documents passed in the body of the request. +You can specify the fields you are interested in through the `fields` parameter or by adding the fields to the request body. +For example: + +---- +GET /my-index-000001/_termvectors/1?fields=message +---- + +Fields can be specified using wildcards, similar to the multi match query. + +Term vectors are real-time by default, not near real-time. +This can be changed by setting `realtime` parameter to `false`. + +You can request three types of values: _term information_, _term statistics_, and _field statistics_. +By default, all term information and field statistics are returned for all fields but term statistics are excluded. + +**Term information** + +* term frequency in the field (always returned) +* term positions (`positions: true`) +* start and end offsets (`offsets: true`) +* term payloads (`payloads: true`), as base64 encoded bytes + +If the requested information wasn't stored in the index, it will be computed on the fly if possible. +Additionally, term vectors could be computed for documents not even existing in the index, but instead provided by the user. + +> warn +> Start and end offsets assume UTF-16 encoding is being used. If you want to use these offsets in order to get the original text that produced this token, you should make sure that the string you are taking a sub-string of is also encoded using UTF-16. + +**Behaviour** + +The term and field statistics are not accurate. +Deleted documents are not taken into account. +The information is only retrieved for the shard the requested document resides in. +The term and field statistics are therefore only useful as relative measures whereas the absolute numbers have no meaning in this context. +By default, when requesting term vectors of artificial documents, a shard to get the statistics from is randomly selected. +Use `routing` only to hit a particular shard. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-termvectors[Endpoint documentation] +[source,ts] +---- +client.termvectors({ index }) +---- +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (string)*: The name of the index that contains the document. +** *`id` (Optional, string)*: A unique identifier for the document. +** *`doc` (Optional, object)*: An artificial document (a document not present in the index) for which you want to retrieve term vectors. +** *`filter` (Optional, { max_doc_freq, max_num_terms, max_term_freq, max_word_length, min_doc_freq, min_term_freq, min_word_length })*: Filter terms based on their tf-idf scores. This could be useful in order find out a good characteristic vector of a document. This feature works in a similar manner to the second phase of the More Like This Query. +** *`per_field_analyzer` (Optional, Record)*: Override the default per-field analyzer. This is useful in order to generate term vectors in any fashion, especially when using artificial documents. When providing an analyzer for a field that already stores term vectors, the term vectors will be regenerated. +** *`fields` (Optional, string | string[])*: A list of fields to include in the statistics. It is used as the default list unless a specific field list is provided in the `completion_fields` or `fielddata_fields` parameters. +** *`field_statistics` (Optional, boolean)*: If `true`, the response includes: * The document count (how many documents contain this field). * The sum of document frequencies (the sum of document frequencies for all terms in this field). * The sum of total term frequencies (the sum of total term frequencies of each term in this field). +** *`offsets` (Optional, boolean)*: If `true`, the response includes term offsets. +** *`payloads` (Optional, boolean)*: If `true`, the response includes term payloads. +** *`positions` (Optional, boolean)*: If `true`, the response includes term positions. +** *`term_statistics` (Optional, boolean)*: If `true`, the response includes: * The total term frequency (how often a term occurs in all documents). * The document frequency (the number of documents containing the current term). By default these values are not returned since term statistics can have a serious performance impact. +** *`routing` (Optional, string)*: A custom value that is used to route operations to a specific shard. +** *`version` (Optional, number)*: If `true`, returns the document version as part of a hit. +** *`version_type` (Optional, Enum("internal" | "external" | "external_gte" | "force"))*: The version type. +** *`preference` (Optional, string)*: The node or shard the operation should be performed on. It is random by default. +** *`realtime` (Optional, boolean)*: If true, the request is real-time as opposed to near-real-time. + +[discrete] +=== update +Update a document. + +Update a document by running a script or passing a partial document. + +If the Elasticsearch security features are enabled, you must have the `index` or `write` index privilege for the target index or index alias. + +The script can update, delete, or skip modifying the document. +The API also supports passing a partial document, which is merged into the existing document. +To fully replace an existing document, use the index API. +This operation: + +* Gets the document (collocated with the shard) from the index. +* Runs the specified script. +* Indexes the result. + +The document must still be reindexed, but using this API removes some network roundtrips and reduces chances of version conflicts between the GET and the index operation. + +The `_source` field must be enabled to use this API. +In addition to `_source`, you can access the following variables through the `ctx` map: `_index`, `_type`, `_id`, `_version`, `_routing`, and `_now` (the current timestamp). + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-update[Endpoint documentation] +[source,ts] +---- +client.update({ id, index }) +---- +[discrete] +==== Arguments + +* *Request (object):* +** *`id` (string)*: A unique identifier for the document to be updated. +** *`index` (string)*: The name of the target index. By default, the index is created automatically if it doesn't exist. +** *`detect_noop` (Optional, boolean)*: If `true`, the `result` in the response is set to `noop` (no operation) when there are no changes to the document. +** *`doc` (Optional, object)*: A partial update to an existing document. If both `doc` and `script` are specified, `doc` is ignored. +** *`doc_as_upsert` (Optional, boolean)*: If `true`, use the contents of 'doc' as the value of 'upsert'. NOTE: Using ingest pipelines with `doc_as_upsert` is not supported. +** *`script` (Optional, { source, id, params, lang, options })*: The script to run to update the document. +** *`scripted_upsert` (Optional, boolean)*: If `true`, run the script whether or not the document exists. +** *`_source` (Optional, boolean | { excludes, includes })*: If `false`, turn off source retrieval. You can also specify a list of the fields you want to retrieve. +** *`upsert` (Optional, object)*: If the document does not already exist, the contents of 'upsert' are inserted as a new document. If the document exists, the 'script' is run. +** *`if_primary_term` (Optional, number)*: Only perform the operation if the document has this primary term. +** *`if_seq_no` (Optional, number)*: Only perform the operation if the document has this sequence number. +** *`include_source_on_error` (Optional, boolean)*: True or false if to include the document source in the error message in case of parsing errors. +** *`lang` (Optional, string)*: The script language. +** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If 'true', Elasticsearch refreshes the affected shards to make this operation visible to search. If 'wait_for', it waits for a refresh to make this operation visible to search. If 'false', it does nothing with refreshes. +** *`require_alias` (Optional, boolean)*: If `true`, the destination must be an index alias. +** *`retry_on_conflict` (Optional, number)*: The number of times the operation should be retried when a conflict occurs. +** *`routing` (Optional, string)*: A custom value used to route operations to a specific shard. +** *`timeout` (Optional, string | -1 | 0)*: The period to wait for the following operations: dynamic mapping updates and waiting for active shards. Elasticsearch waits for at least the timeout period before failing. The actual wait time could be longer, particularly when multiple waits occur. +** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: The number of copies of each shard that must be active before proceeding with the operation. Set to 'all' or any positive integer up to the total number of shards in the index (`number_of_replicas`+1). The default value of `1` means it waits for each primary shard to be active. +** *`_source_excludes` (Optional, string | string[])*: The source fields you want to exclude. +** *`_source_includes` (Optional, string | string[])*: The source fields you want to retrieve. + +[discrete] +=== update_by_query +Update documents. +Updates documents that match the specified query. +If no query is specified, performs an update on every document in the data stream or index without modifying the source, which is useful for picking up mapping changes. + +If the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or alias: + +* `read` +* `index` or `write` + +You can specify the query criteria in the request URI or the request body using the same syntax as the search API. + +When you submit an update by query request, Elasticsearch gets a snapshot of the data stream or index when it begins processing the request and updates matching documents using internal versioning. +When the versions match, the document is updated and the version number is incremented. +If a document changes between the time that the snapshot is taken and the update operation is processed, it results in a version conflict and the operation fails. +You can opt to count version conflicts instead of halting and returning by setting `conflicts` to `proceed`. +Note that if you opt to count version conflicts, the operation could attempt to update more documents from the source than `max_docs` until it has successfully updated `max_docs` documents or it has gone through every document in the source query. + +NOTE: Documents with a version equal to 0 cannot be updated using update by query because internal versioning does not support 0 as a valid version number. + +While processing an update by query request, Elasticsearch performs multiple search requests sequentially to find all of the matching documents. +A bulk update request is performed for each batch of matching documents. +Any query or update failures cause the update by query request to fail and the failures are shown in the response. +Any update requests that completed successfully still stick, they are not rolled back. + +**Throttling update requests** + +To control the rate at which update by query issues batches of update operations, you can set `requests_per_second` to any positive decimal number. +This pads each batch with a wait time to throttle the rate. +Set `requests_per_second` to `-1` to turn off throttling. + +Throttling uses a wait time between batches so that the internal scroll requests can be given a timeout that takes the request padding into account. +The padding time is the difference between the batch size divided by the `requests_per_second` and the time spent writing. +By default the batch size is 1000, so if `requests_per_second` is set to `500`: + +---- +target_time = 1000 / 500 per second = 2 seconds +wait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds +---- + +Since the batch is issued as a single _bulk request, large batch sizes cause Elasticsearch to create many requests and wait before starting the next set. +This is "bursty" instead of "smooth". + +**Slicing** + +Update by query supports sliced scroll to parallelize the update process. +This can improve efficiency and provide a convenient way to break the request down into smaller parts. + +Setting `slices` to `auto` chooses a reasonable number for most data streams and indices. +This setting will use one slice per shard, up to a certain limit. +If there are multiple source data streams or indices, it will choose the number of slices based on the index or backing index with the smallest number of shards. + +Adding `slices` to `_update_by_query` just automates the manual process of creating sub-requests, which means it has some quirks: + +* You can see these requests in the tasks APIs. These sub-requests are "child" tasks of the task for the request with slices. +* Fetching the status of the task for the request with `slices` only contains the status of completed slices. +* These sub-requests are individually addressable for things like cancellation and rethrottling. +* Rethrottling the request with `slices` will rethrottle the unfinished sub-request proportionally. +* Canceling the request with slices will cancel each sub-request. +* Due to the nature of slices each sub-request won't get a perfectly even portion of the documents. All documents will be addressed, but some slices may be larger than others. Expect larger slices to have a more even distribution. +* Parameters like `requests_per_second` and `max_docs` on a request with slices are distributed proportionally to each sub-request. Combine that with the point above about distribution being uneven and you should conclude that using `max_docs` with `slices` might not result in exactly `max_docs` documents being updated. +* Each sub-request gets a slightly different snapshot of the source data stream or index though these are all taken at approximately the same time. + +If you're slicing manually or otherwise tuning automatic slicing, keep in mind that: + +* Query performance is most efficient when the number of slices is equal to the number of shards in the index or backing index. If that number is large (for example, 500), choose a lower number as too many slices hurts performance. Setting slices higher than the number of shards generally does not improve efficiency and adds overhead. +* Update performance scales linearly across available resources with the number of slices. + +Whether query or update performance dominates the runtime depends on the documents being reindexed and cluster resources. + +**Update the document source** + +Update by query supports scripts to update the document source. +As with the update API, you can set `ctx.op` to change the operation that is performed. + +Set `ctx.op = "noop"` if your script decides that it doesn't have to make any changes. +The update by query operation skips updating the document and increments the `noop` counter. + +Set `ctx.op = "delete"` if your script decides that the document should be deleted. +The update by query operation deletes the document and increments the `deleted` counter. + +Update by query supports only `index`, `noop`, and `delete`. +Setting `ctx.op` to anything else is an error. +Setting any other field in `ctx` is an error. +This API enables you to only modify the source of matching documents; you cannot move them. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-update-by-query[Endpoint documentation] +[source,ts] +---- +client.updateByQuery({ index }) +---- +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (string | string[])*: A list of data streams, indices, and aliases to search. It supports wildcards (`*`). To search all data streams or indices, omit this parameter or use `*` or `_all`. +** *`max_docs` (Optional, number)*: The maximum number of documents to update. +** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: The documents to update using the Query DSL. +** *`script` (Optional, { source, id, params, lang, options })*: The script to run to update the document source or metadata when updating. +** *`slice` (Optional, { field, id, max })*: Slice the request manually using the provided slice ID and total number of slices. +** *`conflicts` (Optional, Enum("abort" | "proceed"))*: The preferred behavior when update by query hits version conflicts: `abort` or `proceed`. +** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. +** *`analyzer` (Optional, string)*: The analyzer to use for the query string. This parameter can be used only when the `q` query string parameter is specified. +** *`analyze_wildcard` (Optional, boolean)*: If `true`, wildcard and prefix queries are analyzed. This parameter can be used only when the `q` query string parameter is specified. +** *`default_operator` (Optional, Enum("and" | "or"))*: The default operator for query string query: `AND` or `OR`. This parameter can be used only when the `q` query string parameter is specified. +** *`df` (Optional, string)*: The field to use as default where no field prefix is given in the query string. This parameter can be used only when the `q` query string parameter is specified. +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. +** *`from` (Optional, number)*: Starting offset (default: 0) +** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error if it targets a missing or closed index. +** *`lenient` (Optional, boolean)*: If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. This parameter can be used only when the `q` query string parameter is specified. +** *`pipeline` (Optional, string)*: The ID of the pipeline to use to preprocess incoming documents. If the index has a default ingest pipeline specified, then setting the value to `_none` disables the default ingest pipeline for this request. If a final pipeline is configured it will always run, regardless of the value of this parameter. +** *`preference` (Optional, string)*: The node or shard the operation should be performed on. It is random by default. +** *`q` (Optional, string)*: A query in the Lucene query string syntax. +** *`refresh` (Optional, boolean)*: If `true`, Elasticsearch refreshes affected shards to make the operation visible to search after the request completes. This is different than the update API's `refresh` parameter, which causes just the shard that received the request to be refreshed. +** *`request_cache` (Optional, boolean)*: If `true`, the request cache is used for this request. It defaults to the index-level setting. +** *`requests_per_second` (Optional, float)*: The throttle for this request in sub-requests per second. +** *`routing` (Optional, string)*: A custom value used to route operations to a specific shard. +** *`scroll` (Optional, string | -1 | 0)*: The period to retain the search context for scrolling. +** *`scroll_size` (Optional, number)*: The size of the scroll request that powers the operation. +** *`search_timeout` (Optional, string | -1 | 0)*: An explicit timeout for each search request. By default, there is no timeout. +** *`search_type` (Optional, Enum("query_then_fetch" | "dfs_query_then_fetch"))*: The type of the search operation. Available options include `query_then_fetch` and `dfs_query_then_fetch`. +** *`slices` (Optional, number | Enum("auto"))*: The number of slices this task should be divided into. +** *`sort` (Optional, string[])*: A list of : pairs. +** *`stats` (Optional, string[])*: The specific `tag` of the request for logging and statistical purposes. +** *`terminate_after` (Optional, number)*: The maximum number of documents to collect for each shard. If a query reaches this limit, Elasticsearch terminates the query early. Elasticsearch collects documents before sorting. IMPORTANT: Use with caution. Elasticsearch applies this parameter to each shard handling the request. When possible, let Elasticsearch perform early termination automatically. Avoid specifying this parameter for requests that target data streams with backing indices across multiple data tiers. +** *`timeout` (Optional, string | -1 | 0)*: The period each update request waits for the following operations: dynamic mapping updates, waiting for active shards. By default, it is one minute. This guarantees Elasticsearch waits for at least the timeout before failing. The actual wait time could be longer, particularly when multiple waits occur. +** *`version` (Optional, boolean)*: If `true`, returns the document version as part of a hit. +** *`version_type` (Optional, boolean)*: Should the document increment the version number (internal) on hit or not (reindex) +** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: The number of shard copies that must be active before proceeding with the operation. Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). The `timeout` parameter controls how long each write request waits for unavailable shards to become available. Both work exactly the way they work in the bulk API. +** *`wait_for_completion` (Optional, boolean)*: If `true`, the request blocks until the operation is complete. If `false`, Elasticsearch performs some preflight checks, launches the request, and returns a task ID that you can use to cancel or get the status of the task. Elasticsearch creates a record of this task as a document at `.tasks/task/${taskId}`. + +[discrete] +=== update_by_query_rethrottle +Throttle an update by query operation. + +Change the number of requests per second for a particular update by query operation. +Rethrottling that speeds up the query takes effect immediately but rethrotting that slows down the query takes effect after completing the current batch to prevent scroll timeouts. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-update-by-query-rethrottle[Endpoint documentation] +[source,ts] +---- +client.updateByQueryRethrottle({ task_id }) +---- +[discrete] +==== Arguments + +* *Request (object):* +** *`task_id` (string)*: The ID for the task. +** *`requests_per_second` (Optional, float)*: The throttle for this request in sub-requests per second. To turn off throttling, set it to `-1`. + +[discrete] +=== async_search +[discrete] +==== delete +Delete an async search. + +If the asynchronous search is still running, it is cancelled. +Otherwise, the saved search results are deleted. +If the Elasticsearch security features are enabled, the deletion of a specific async search is restricted to: the authenticated user that submitted the original search request; users that have the `cancel_task` cluster privilege. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-async-search-submit[Endpoint documentation] +[source,ts] +---- +client.asyncSearch.delete({ id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`id` (string)*: A unique identifier for the async search. + +[discrete] +==== get +Get async search results. + +Retrieve the results of a previously submitted asynchronous search request. +If the Elasticsearch security features are enabled, access to the results of a specific async search is restricted to the user or API key that submitted it. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-async-search-submit[Endpoint documentation] +[source,ts] +---- +client.asyncSearch.get({ id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`id` (string)*: A unique identifier for the async search. +** *`keep_alive` (Optional, string | -1 | 0)*: The length of time that the async search should be available in the cluster. +When not specified, the `keep_alive` set with the corresponding submit async request will be used. +Otherwise, it is possible to override the value and extend the validity of the request. +When this period expires, the search, if still running, is cancelled. +If the search is completed, its saved results are deleted. +** *`typed_keys` (Optional, boolean)*: Specify whether aggregation and suggester names should be prefixed by their respective types in the response +** *`wait_for_completion_timeout` (Optional, string | -1 | 0)*: Specifies to wait for the search to be completed up until the provided timeout. +Final results will be returned if available before the timeout expires, otherwise the currently available results will be returned once the timeout expires. +By default no timeout is set meaning that the currently available results will be returned without any additional wait. + +[discrete] +==== status +Get the async search status. + +Get the status of a previously submitted async search request given its identifier, without retrieving search results. +If the Elasticsearch security features are enabled, the access to the status of a specific async search is restricted to: + +* The user or API key that submitted the original async search request. +* Users that have the `monitor` cluster privilege or greater privileges. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-async-search-submit[Endpoint documentation] +[source,ts] +---- +client.asyncSearch.status({ id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`id` (string)*: A unique identifier for the async search. +** *`keep_alive` (Optional, string | -1 | 0)*: The length of time that the async search needs to be available. +Ongoing async searches and any saved search results are deleted after this period. + +[discrete] +==== submit +Run an async search. + +When the primary sort of the results is an indexed field, shards get sorted based on minimum and maximum value that they hold for that field. Partial results become available following the sort criteria that was requested. + +Warning: Asynchronous search does not support scroll or search requests that include only the suggest section. + +By default, Elasticsearch does not allow you to store an async search response larger than 10Mb and an attempt to do this results in an error. +The maximum allowed size for a stored async search response can be set by changing the `search.max_async_search_response_size` cluster level setting. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-async-search-submit[Endpoint documentation] +[source,ts] +---- +client.asyncSearch.submit({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (Optional, string | string[])*: A list of index names to search; use `_all` or empty string to perform the operation on all indices +** *`aggregations` (Optional, Record)* +** *`collapse` (Optional, { field, inner_hits, max_concurrent_group_searches, collapse })* +** *`explain` (Optional, boolean)*: If true, returns detailed information about score computation as part of a hit. +** *`ext` (Optional, Record)*: Configuration of search extensions defined by Elasticsearch plugins. +** *`from` (Optional, number)*: Starting document offset. By default, you cannot page through more than 10,000 +hits using the from and size parameters. To page through more hits, use the +search_after parameter. +** *`highlight` (Optional, { encoder, fields })* +** *`track_total_hits` (Optional, boolean | number)*: Number of hits matching the query to count accurately. If true, the exact +number of hits is returned at the cost of some performance. If false, the +response does not include the total number of hits matching the query. +Defaults to 10,000 hits. +** *`indices_boost` (Optional, Record[])*: Boosts the _score of documents from specified indices. +** *`docvalue_fields` (Optional, { field, format, include_unmapped }[])*: Array of wildcard (*) patterns. The request returns doc values for field +names matching these patterns in the hits.fields property of the response. +** *`knn` (Optional, { field, query_vector, query_vector_builder, k, num_candidates, boost, filter, similarity, inner_hits, rescore_vector } | { field, query_vector, query_vector_builder, k, num_candidates, boost, filter, similarity, inner_hits, rescore_vector }[])*: Defines the approximate kNN search to run. +** *`min_score` (Optional, number)*: Minimum _score for matching documents. Documents with a lower _score are +not included in the search results. +** *`post_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })* +** *`profile` (Optional, boolean)* +** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Defines the search definition using the Query DSL. +** *`rescore` (Optional, { window_size, query, learning_to_rank } | { window_size, query, learning_to_rank }[])* +** *`script_fields` (Optional, Record)*: Retrieve a script evaluation (based on different fields) for each hit. +** *`search_after` (Optional, number | number | string | boolean | null[])* +** *`size` (Optional, number)*: The number of hits to return. By default, you cannot page through more +than 10,000 hits using the from and size parameters. To page through more +hits, use the search_after parameter. +** *`slice` (Optional, { field, id, max })* +** *`sort` (Optional, string | { _score, _doc, _geo_distance, _script } | string | { _score, _doc, _geo_distance, _script }[])* +** *`_source` (Optional, boolean | { excludes, includes })*: Indicates which source fields are returned for matching documents. These +fields are returned in the hits._source property of the search response. +** *`fields` (Optional, { field, format, include_unmapped }[])*: Array of wildcard (*) patterns. The request returns values for field names +matching these patterns in the hits.fields property of the response. +** *`suggest` (Optional, { text })* +** *`terminate_after` (Optional, number)*: Maximum number of documents to collect for each shard. If a query reaches this +limit, Elasticsearch terminates the query early. Elasticsearch collects documents +before sorting. Defaults to 0, which does not terminate query execution early. +** *`timeout` (Optional, string)*: Specifies the period of time to wait for a response from each shard. If no response +is received before the timeout expires, the request fails and returns an error. +Defaults to no timeout. +** *`track_scores` (Optional, boolean)*: If true, calculate and return document scores, even if the scores are not used for sorting. +** *`version` (Optional, boolean)*: If true, returns document version as part of a hit. +** *`seq_no_primary_term` (Optional, boolean)*: If true, returns sequence number and primary term of the last modification +of each hit. See Optimistic concurrency control. +** *`stored_fields` (Optional, string | string[])*: List of stored fields to return as part of a hit. If no fields are specified, +no stored fields are included in the response. If this field is specified, the _source +parameter defaults to false. You can pass _source: true to return both source fields +and stored fields in the search response. +** *`pit` (Optional, { id, keep_alive })*: Limits the search to a point in time (PIT). If you provide a PIT, you +cannot specify an in the request path. +** *`runtime_mappings` (Optional, Record)*: Defines one or more runtime fields in the search request. These fields take +precedence over mapped fields with the same name. +** *`stats` (Optional, string[])*: Stats groups to associate with the search. Each group maintains a statistics +aggregation for its associated searches. You can retrieve these stats using +the indices stats API. +** *`wait_for_completion_timeout` (Optional, string | -1 | 0)*: Blocks and waits until the search is completed up to a certain timeout. +When the async search completes within the timeout, the response won’t include the ID as the results are not stored in the cluster. +** *`keep_alive` (Optional, string | -1 | 0)*: Specifies how long the async search needs to be available. +Ongoing async searches and any saved search results are deleted after this period. +** *`keep_on_completion` (Optional, boolean)*: If `true`, results are stored for later retrieval when the search completes within the `wait_for_completion_timeout`. +** *`allow_no_indices` (Optional, boolean)*: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) +** *`allow_partial_search_results` (Optional, boolean)*: Indicate if an error should be returned if there is a partial search failure or timeout +** *`analyzer` (Optional, string)*: The analyzer to use for the query string +** *`analyze_wildcard` (Optional, boolean)*: Specify whether wildcard and prefix queries should be analyzed (default: false) +** *`batched_reduce_size` (Optional, number)*: Affects how often partial results become available, which happens whenever shard results are reduced. +A partial reduction is performed every time the coordinating node has received a certain number of new shard responses (5 by default). +** *`ccs_minimize_roundtrips` (Optional, boolean)*: The default value is the only supported value. +** *`default_operator` (Optional, Enum("and" | "or"))*: The default operator for query string query (AND or OR) +** *`df` (Optional, string)*: The field to use as default where no field prefix is given in the query string +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Whether to expand wildcard expression to concrete indices that are open, closed or both. +** *`ignore_throttled` (Optional, boolean)*: Whether specified concrete, expanded or aliased indices should be ignored when throttled +** *`ignore_unavailable` (Optional, boolean)*: Whether specified concrete indices should be ignored when unavailable (missing or closed) +** *`lenient` (Optional, boolean)*: Specify whether format-based query failures (such as providing text to a numeric field) should be ignored +** *`max_concurrent_shard_requests` (Optional, number)*: The number of concurrent shard requests per node this search executes concurrently. This value should be used to limit the impact of the search on the cluster in order to limit the number of concurrent shard requests +** *`preference` (Optional, string)*: Specify the node or shard the operation should be performed on (default: random) +** *`request_cache` (Optional, boolean)*: Specify if request cache should be used for this request or not, defaults to true +** *`routing` (Optional, string)*: A list of specific routing values +** *`search_type` (Optional, Enum("query_then_fetch" | "dfs_query_then_fetch"))*: Search operation type +** *`suggest_field` (Optional, string)*: Specifies which field to use for suggestions. +** *`suggest_mode` (Optional, Enum("missing" | "popular" | "always"))*: Specify suggest mode +** *`suggest_size` (Optional, number)*: How many suggestions to return in response +** *`suggest_text` (Optional, string)*: The source text for which the suggestions should be returned. +** *`typed_keys` (Optional, boolean)*: Specify whether aggregation and suggester names should be prefixed by their respective types in the response +** *`rest_total_hits_as_int` (Optional, boolean)*: Indicates whether hits.total should be rendered as an integer or an object in the rest search response +** *`_source_excludes` (Optional, string | string[])*: A list of fields to exclude from the returned _source field +** *`_source_includes` (Optional, string | string[])*: A list of fields to extract and return from the _source field +** *`q` (Optional, string)*: Query in the Lucene query string syntax + +[discrete] +=== autoscaling +[discrete] +==== delete_autoscaling_policy +Delete an autoscaling policy. + +NOTE: This feature is designed for indirect use by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-autoscaling-delete-autoscaling-policy[Endpoint documentation] +[source,ts] +---- +client.autoscaling.deleteAutoscalingPolicy({ name }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`name` (string)*: the name of the autoscaling policy +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + +[discrete] +==== get_autoscaling_capacity +Get the autoscaling capacity. + +NOTE: This feature is designed for indirect use by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. + +This API gets the current autoscaling capacity based on the configured autoscaling policy. +It will return information to size the cluster appropriately to the current workload. + +The `required_capacity` is calculated as the maximum of the `required_capacity` result of all individual deciders that are enabled for the policy. + +The operator should verify that the `current_nodes` match the operator’s knowledge of the cluster to avoid making autoscaling decisions based on stale or incomplete information. + +The response contains decider-specific information you can use to diagnose how and why autoscaling determined a certain capacity was required. +This information is provided for diagnosis only. +Do not use this information to make autoscaling decisions. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-autoscaling-get-autoscaling-capacity[Endpoint documentation] +[source,ts] +---- +client.autoscaling.getAutoscalingCapacity({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. + +[discrete] +==== get_autoscaling_policy +Get an autoscaling policy. + +NOTE: This feature is designed for indirect use by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-autoscaling-get-autoscaling-capacity[Endpoint documentation] +[source,ts] +---- +client.autoscaling.getAutoscalingPolicy({ name }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`name` (string)*: the name of the autoscaling policy +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. + +[discrete] +==== put_autoscaling_policy +Create or update an autoscaling policy. + +NOTE: This feature is designed for indirect use by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-autoscaling-put-autoscaling-policy[Endpoint documentation] +[source,ts] +---- +client.autoscaling.putAutoscalingPolicy({ name }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`name` (string)*: the name of the autoscaling policy +** *`policy` (Optional, { roles, deciders })* +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + +[discrete] +=== cat +[discrete] +==== aliases +Get aliases. + +Get the cluster's index aliases, including filter and routing information. +This API does not return data stream aliases. + +IMPORTANT: CAT APIs are only intended for human consumption using the command line or the Kibana console. They are not intended for use by applications. For application consumption, use the aliases API. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-aliases[Endpoint documentation] +[source,ts] +---- +client.cat.aliases({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`name` (Optional, string | string[])*: A list of aliases to retrieve. Supports wildcards (`*`). To retrieve all aliases, omit this parameter or use `*` or `_all`. +** *`h` (Optional, string | string[])*: List of columns to appear in the response. Supports simple wildcards. +** *`s` (Optional, string | string[])*: List of columns that determine how the table should be sorted. +Sorting defaults to ascending and can be changed by setting `:asc` +or `:desc` as a suffix to the column name. +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: The type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +It supports a list of values, such as `open,hidden`. +** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for a connection to the master node. +If the master node is not available before the timeout expires, the request fails and returns an error. +To indicated that the request should never timeout, you can set it to `-1`. + +[discrete] +==== allocation +Get shard allocation information. + +Get a snapshot of the number of shards allocated to each data node and their disk space. + +IMPORTANT: CAT APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-allocation[Endpoint documentation] +[source,ts] +---- +client.cat.allocation({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`node_id` (Optional, string | string[])*: A list of node identifiers or names used to limit the returned information. +** *`bytes` (Optional, Enum("b" | "kb" | "mb" | "gb" | "tb" | "pb"))*: The unit used to display byte values. +** *`h` (Optional, string | string[])*: List of columns to appear in the response. Supports simple wildcards. +** *`s` (Optional, string | string[])*: List of columns that determine how the table should be sorted. +Sorting defaults to ascending and can be changed by setting `:asc` +or `:desc` as a suffix to the column name. +** *`local` (Optional, boolean)*: If `true`, the request computes the list of selected nodes from the +local cluster state. If `false` the list of selected nodes are computed +from the cluster state of the master node. In both cases the coordinating +node will send requests for further information to each selected node. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. + +[discrete] +==== component_templates +Get component templates. + +Get information about component templates in a cluster. +Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases. + +IMPORTANT: CAT APIs are only intended for human consumption using the command line or Kibana console. +They are not intended for use by applications. For application consumption, use the get component template API. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-component-templates[Endpoint documentation] +[source,ts] +---- +client.cat.componentTemplates({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`name` (Optional, string)*: The name of the component template. +It accepts wildcard expressions. +If it is omitted, all component templates are returned. +** *`h` (Optional, string | string[])*: List of columns to appear in the response. Supports simple wildcards. +** *`s` (Optional, string | string[])*: List of columns that determine how the table should be sorted. +Sorting defaults to ascending and can be changed by setting `:asc` +or `:desc` as a suffix to the column name. +** *`local` (Optional, boolean)*: If `true`, the request computes the list of selected nodes from the +local cluster state. If `false` the list of selected nodes are computed +from the cluster state of the master node. In both cases the coordinating +node will send requests for further information to each selected node. +** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for a connection to the master node. + +[discrete] +==== count +Get a document count. + +Get quick access to a document count for a data stream, an index, or an entire cluster. +The document count only includes live documents, not deleted documents which have not yet been removed by the merge process. + +IMPORTANT: CAT APIs are only intended for human consumption using the command line or Kibana console. +They are not intended for use by applications. For application consumption, use the count API. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-count[Endpoint documentation] +[source,ts] +---- +client.cat.count({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (Optional, string | string[])*: A list of data streams, indices, and aliases used to limit the request. +It supports wildcards (`*`). +To target all data streams and indices, omit this parameter or use `*` or `_all`. +** *`h` (Optional, string | string[])*: List of columns to appear in the response. Supports simple wildcards. +** *`s` (Optional, string | string[])*: List of columns that determine how the table should be sorted. +Sorting defaults to ascending and can be changed by setting `:asc` +or `:desc` as a suffix to the column name. + +[discrete] +==== fielddata +Get field data cache information. + +Get the amount of heap memory currently used by the field data cache on every data node in the cluster. + +IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. +They are not intended for use by applications. For application consumption, use the nodes stats API. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-fielddata[Endpoint documentation] +[source,ts] +---- +client.cat.fielddata({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`fields` (Optional, string | string[])*: List of fields used to limit returned information. +To retrieve all fields, omit this parameter. +** *`bytes` (Optional, Enum("b" | "kb" | "mb" | "gb" | "tb" | "pb"))*: The unit used to display byte values. +** *`h` (Optional, string | string[])*: List of columns to appear in the response. Supports simple wildcards. +** *`s` (Optional, string | string[])*: List of columns that determine how the table should be sorted. +Sorting defaults to ascending and can be changed by setting `:asc` +or `:desc` as a suffix to the column name. + +[discrete] +==== health +Get the cluster health status. + +IMPORTANT: CAT APIs are only intended for human consumption using the command line or Kibana console. +They are not intended for use by applications. For application consumption, use the cluster health API. +This API is often used to check malfunctioning clusters. +To help you track cluster health alongside log files and alerting systems, the API returns timestamps in two formats: +`HH:MM:SS`, which is human-readable but includes no date information; +`Unix epoch time`, which is machine-sortable and includes date information. +The latter format is useful for cluster recoveries that take multiple days. +You can use the cat health API to verify cluster health across multiple nodes. +You also can use the API to track the recovery of a large cluster over a longer period of time. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-health[Endpoint documentation] +[source,ts] +---- +client.cat.health({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))*: The unit used to display time values. +** *`ts` (Optional, boolean)*: If true, returns `HH:MM:SS` and Unix epoch timestamps. +** *`h` (Optional, string | string[])*: List of columns to appear in the response. Supports simple wildcards. +** *`s` (Optional, string | string[])*: List of columns that determine how the table should be sorted. +Sorting defaults to ascending and can be changed by setting `:asc` +or `:desc` as a suffix to the column name. + +[discrete] +==== help +Get CAT help. + +Get help for the CAT APIs. + +https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-cat[Endpoint documentation] +[source,ts] +---- +client.cat.help() +---- + + +[discrete] +==== indices +Get index information. + +Get high-level information about indices in a cluster, including backing indices for data streams. + +Use this request to get the following information for each index in a cluster: +- shard count +- document count +- deleted document count +- primary store size +- total store size of all shards, including shard replicas + +These metrics are retrieved directly from Lucene, which Elasticsearch uses internally to power indexing and search. As a result, all document counts include hidden nested documents. +To get an accurate count of Elasticsearch documents, use the cat count or count APIs. + +CAT APIs are only intended for human consumption using the command line or Kibana console. +They are not intended for use by applications. For application consumption, use an index endpoint. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-indices[Endpoint documentation] +[source,ts] +---- +client.cat.indices({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (Optional, string | string[])*: List of data streams, indices, and aliases used to limit the request. +Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. +** *`bytes` (Optional, Enum("b" | "kb" | "mb" | "gb" | "tb" | "pb"))*: The unit used to display byte values. +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: The type of index that wildcard patterns can match. +** *`health` (Optional, Enum("green" | "yellow" | "red"))*: The health status used to limit returned indices. By default, the response includes indices of any health status. +** *`include_unloaded_segments` (Optional, boolean)*: If true, the response includes information from segments that are not loaded into memory. +** *`pri` (Optional, boolean)*: If true, the response only includes information from primary shards. +** *`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))*: The unit used to display time values. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. +** *`h` (Optional, string | string[])*: List of columns to appear in the response. Supports simple wildcards. +** *`s` (Optional, string | string[])*: List of columns that determine how the table should be sorted. +Sorting defaults to ascending and can be changed by setting `:asc` +or `:desc` as a suffix to the column name. + +[discrete] +==== master +Get master node information. + +Get information about the master node, including the ID, bound IP address, and name. + +IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-master[Endpoint documentation] +[source,ts] +---- +client.cat.master({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`h` (Optional, string | string[])*: List of columns to appear in the response. Supports simple wildcards. +** *`s` (Optional, string | string[])*: List of columns that determine how the table should be sorted. +Sorting defaults to ascending and can be changed by setting `:asc` +or `:desc` as a suffix to the column name. +** *`local` (Optional, boolean)*: If `true`, the request computes the list of selected nodes from the +local cluster state. If `false` the list of selected nodes are computed +from the cluster state of the master node. In both cases the coordinating +node will send requests for further information to each selected node. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. + +[discrete] +==== ml_data_frame_analytics +Get data frame analytics jobs. + +Get configuration and usage information about data frame analytics jobs. + +IMPORTANT: CAT APIs are only intended for human consumption using the Kibana +console or command line. They are not intended for use by applications. For +application consumption, use the get data frame analytics jobs statistics API. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-ml-data-frame-analytics[Endpoint documentation] +[source,ts] +---- +client.cat.mlDataFrameAnalytics({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`id` (Optional, string)*: The ID of the data frame analytics to fetch +** *`allow_no_match` (Optional, boolean)*: Whether to ignore if a wildcard expression matches no configs. (This includes `_all` string or when no configs have been specified) +** *`bytes` (Optional, Enum("b" | "kb" | "mb" | "gb" | "tb" | "pb"))*: The unit in which to display byte values +** *`h` (Optional, Enum("assignment_explanation" | "create_time" | "description" | "dest_index" | "failure_reason" | "id" | "model_memory_limit" | "node.address" | "node.ephemeral_id" | "node.id" | "node.name" | "progress" | "source_index" | "state" | "type" | "version") | Enum("assignment_explanation" | "create_time" | "description" | "dest_index" | "failure_reason" | "id" | "model_memory_limit" | "node.address" | "node.ephemeral_id" | "node.id" | "node.name" | "progress" | "source_index" | "state" | "type" | "version")[])*: List of column names to display. +** *`s` (Optional, Enum("assignment_explanation" | "create_time" | "description" | "dest_index" | "failure_reason" | "id" | "model_memory_limit" | "node.address" | "node.ephemeral_id" | "node.id" | "node.name" | "progress" | "source_index" | "state" | "type" | "version") | Enum("assignment_explanation" | "create_time" | "description" | "dest_index" | "failure_reason" | "id" | "model_memory_limit" | "node.address" | "node.ephemeral_id" | "node.id" | "node.name" | "progress" | "source_index" | "state" | "type" | "version")[])*: List of column names or column aliases used to sort the +response. +** *`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))*: Unit used to display time values. + +[discrete] +==== ml_datafeeds +Get datafeeds. + +Get configuration and usage information about datafeeds. +This API returns a maximum of 10,000 datafeeds. +If the Elasticsearch security features are enabled, you must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` +cluster privileges to use this API. + +IMPORTANT: CAT APIs are only intended for human consumption using the Kibana +console or command line. They are not intended for use by applications. For +application consumption, use the get datafeed statistics API. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-ml-datafeeds[Endpoint documentation] +[source,ts] +---- +client.cat.mlDatafeeds({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`datafeed_id` (Optional, string)*: A numerical character string that uniquely identifies the datafeed. +** *`allow_no_match` (Optional, boolean)*: Specifies what to do when the request: + +* Contains wildcard expressions and there are no datafeeds that match. +* Contains the `_all` string or no identifiers and there are no matches. +* Contains wildcard expressions and there are only partial matches. + +If `true`, the API returns an empty datafeeds array when there are no matches and the subset of results when +there are partial matches. If `false`, the API returns a 404 status code when there are no matches or only +partial matches. +** *`h` (Optional, Enum("ae" | "bc" | "id" | "na" | "ne" | "ni" | "nn" | "sba" | "sc" | "seah" | "st" | "s") | Enum("ae" | "bc" | "id" | "na" | "ne" | "ni" | "nn" | "sba" | "sc" | "seah" | "st" | "s")[])*: List of column names to display. +** *`s` (Optional, Enum("ae" | "bc" | "id" | "na" | "ne" | "ni" | "nn" | "sba" | "sc" | "seah" | "st" | "s") | Enum("ae" | "bc" | "id" | "na" | "ne" | "ni" | "nn" | "sba" | "sc" | "seah" | "st" | "s")[])*: List of column names or column aliases used to sort the response. +** *`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))*: The unit used to display time values. + +[discrete] +==== ml_jobs +Get anomaly detection jobs. + +Get configuration and usage information for anomaly detection jobs. +This API returns a maximum of 10,000 jobs. +If the Elasticsearch security features are enabled, you must have `monitor_ml`, +`monitor`, `manage_ml`, or `manage` cluster privileges to use this API. + +IMPORTANT: CAT APIs are only intended for human consumption using the Kibana +console or command line. They are not intended for use by applications. For +application consumption, use the get anomaly detection job statistics API. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-ml-jobs[Endpoint documentation] +[source,ts] +---- +client.cat.mlJobs({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`job_id` (Optional, string)*: Identifier for the anomaly detection job. +** *`allow_no_match` (Optional, boolean)*: Specifies what to do when the request: + +* Contains wildcard expressions and there are no jobs that match. +* Contains the `_all` string or no identifiers and there are no matches. +* Contains wildcard expressions and there are only partial matches. + +If `true`, the API returns an empty jobs array when there are no matches and the subset of results when there +are partial matches. If `false`, the API returns a 404 status code when there are no matches or only partial +matches. +** *`bytes` (Optional, Enum("b" | "kb" | "mb" | "gb" | "tb" | "pb"))*: The unit used to display byte values. +** *`h` (Optional, Enum("assignment_explanation" | "buckets.count" | "buckets.time.exp_avg" | "buckets.time.exp_avg_hour" | "buckets.time.max" | "buckets.time.min" | "buckets.time.total" | "data.buckets" | "data.earliest_record" | "data.empty_buckets" | "data.input_bytes" | "data.input_fields" | "data.input_records" | "data.invalid_dates" | "data.last" | "data.last_empty_bucket" | "data.last_sparse_bucket" | "data.latest_record" | "data.missing_fields" | "data.out_of_order_timestamps" | "data.processed_fields" | "data.processed_records" | "data.sparse_buckets" | "forecasts.memory.avg" | "forecasts.memory.max" | "forecasts.memory.min" | "forecasts.memory.total" | "forecasts.records.avg" | "forecasts.records.max" | "forecasts.records.min" | "forecasts.records.total" | "forecasts.time.avg" | "forecasts.time.max" | "forecasts.time.min" | "forecasts.time.total" | "forecasts.total" | "id" | "model.bucket_allocation_failures" | "model.by_fields" | "model.bytes" | "model.bytes_exceeded" | "model.categorization_status" | "model.categorized_doc_count" | "model.dead_category_count" | "model.failed_category_count" | "model.frequent_category_count" | "model.log_time" | "model.memory_limit" | "model.memory_status" | "model.over_fields" | "model.partition_fields" | "model.rare_category_count" | "model.timestamp" | "model.total_category_count" | "node.address" | "node.ephemeral_id" | "node.id" | "node.name" | "opened_time" | "state") | Enum("assignment_explanation" | "buckets.count" | "buckets.time.exp_avg" | "buckets.time.exp_avg_hour" | "buckets.time.max" | "buckets.time.min" | "buckets.time.total" | "data.buckets" | "data.earliest_record" | "data.empty_buckets" | "data.input_bytes" | "data.input_fields" | "data.input_records" | "data.invalid_dates" | "data.last" | "data.last_empty_bucket" | "data.last_sparse_bucket" | "data.latest_record" | "data.missing_fields" | "data.out_of_order_timestamps" | "data.processed_fields" | "data.processed_records" | "data.sparse_buckets" | "forecasts.memory.avg" | "forecasts.memory.max" | "forecasts.memory.min" | "forecasts.memory.total" | "forecasts.records.avg" | "forecasts.records.max" | "forecasts.records.min" | "forecasts.records.total" | "forecasts.time.avg" | "forecasts.time.max" | "forecasts.time.min" | "forecasts.time.total" | "forecasts.total" | "id" | "model.bucket_allocation_failures" | "model.by_fields" | "model.bytes" | "model.bytes_exceeded" | "model.categorization_status" | "model.categorized_doc_count" | "model.dead_category_count" | "model.failed_category_count" | "model.frequent_category_count" | "model.log_time" | "model.memory_limit" | "model.memory_status" | "model.over_fields" | "model.partition_fields" | "model.rare_category_count" | "model.timestamp" | "model.total_category_count" | "node.address" | "node.ephemeral_id" | "node.id" | "node.name" | "opened_time" | "state")[])*: List of column names to display. +** *`s` (Optional, Enum("assignment_explanation" | "buckets.count" | "buckets.time.exp_avg" | "buckets.time.exp_avg_hour" | "buckets.time.max" | "buckets.time.min" | "buckets.time.total" | "data.buckets" | "data.earliest_record" | "data.empty_buckets" | "data.input_bytes" | "data.input_fields" | "data.input_records" | "data.invalid_dates" | "data.last" | "data.last_empty_bucket" | "data.last_sparse_bucket" | "data.latest_record" | "data.missing_fields" | "data.out_of_order_timestamps" | "data.processed_fields" | "data.processed_records" | "data.sparse_buckets" | "forecasts.memory.avg" | "forecasts.memory.max" | "forecasts.memory.min" | "forecasts.memory.total" | "forecasts.records.avg" | "forecasts.records.max" | "forecasts.records.min" | "forecasts.records.total" | "forecasts.time.avg" | "forecasts.time.max" | "forecasts.time.min" | "forecasts.time.total" | "forecasts.total" | "id" | "model.bucket_allocation_failures" | "model.by_fields" | "model.bytes" | "model.bytes_exceeded" | "model.categorization_status" | "model.categorized_doc_count" | "model.dead_category_count" | "model.failed_category_count" | "model.frequent_category_count" | "model.log_time" | "model.memory_limit" | "model.memory_status" | "model.over_fields" | "model.partition_fields" | "model.rare_category_count" | "model.timestamp" | "model.total_category_count" | "node.address" | "node.ephemeral_id" | "node.id" | "node.name" | "opened_time" | "state") | Enum("assignment_explanation" | "buckets.count" | "buckets.time.exp_avg" | "buckets.time.exp_avg_hour" | "buckets.time.max" | "buckets.time.min" | "buckets.time.total" | "data.buckets" | "data.earliest_record" | "data.empty_buckets" | "data.input_bytes" | "data.input_fields" | "data.input_records" | "data.invalid_dates" | "data.last" | "data.last_empty_bucket" | "data.last_sparse_bucket" | "data.latest_record" | "data.missing_fields" | "data.out_of_order_timestamps" | "data.processed_fields" | "data.processed_records" | "data.sparse_buckets" | "forecasts.memory.avg" | "forecasts.memory.max" | "forecasts.memory.min" | "forecasts.memory.total" | "forecasts.records.avg" | "forecasts.records.max" | "forecasts.records.min" | "forecasts.records.total" | "forecasts.time.avg" | "forecasts.time.max" | "forecasts.time.min" | "forecasts.time.total" | "forecasts.total" | "id" | "model.bucket_allocation_failures" | "model.by_fields" | "model.bytes" | "model.bytes_exceeded" | "model.categorization_status" | "model.categorized_doc_count" | "model.dead_category_count" | "model.failed_category_count" | "model.frequent_category_count" | "model.log_time" | "model.memory_limit" | "model.memory_status" | "model.over_fields" | "model.partition_fields" | "model.rare_category_count" | "model.timestamp" | "model.total_category_count" | "node.address" | "node.ephemeral_id" | "node.id" | "node.name" | "opened_time" | "state")[])*: List of column names or column aliases used to sort the response. +** *`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))*: The unit used to display time values. + +[discrete] +==== ml_trained_models +Get trained models. + +Get configuration and usage information about inference trained models. + +IMPORTANT: CAT APIs are only intended for human consumption using the Kibana +console or command line. They are not intended for use by applications. For +application consumption, use the get trained models statistics API. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-ml-trained-models[Endpoint documentation] +[source,ts] +---- +client.cat.mlTrainedModels({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`model_id` (Optional, string)*: A unique identifier for the trained model. +** *`allow_no_match` (Optional, boolean)*: Specifies what to do when the request: contains wildcard expressions and there are no models that match; contains the `_all` string or no identifiers and there are no matches; contains wildcard expressions and there are only partial matches. +If `true`, the API returns an empty array when there are no matches and the subset of results when there are partial matches. +If `false`, the API returns a 404 status code when there are no matches or only partial matches. +** *`bytes` (Optional, Enum("b" | "kb" | "mb" | "gb" | "tb" | "pb"))*: The unit used to display byte values. +** *`h` (Optional, Enum("create_time" | "created_by" | "data_frame_analytics_id" | "description" | "heap_size" | "id" | "ingest.count" | "ingest.current" | "ingest.failed" | "ingest.pipelines" | "ingest.time" | "license" | "operations" | "version") | Enum("create_time" | "created_by" | "data_frame_analytics_id" | "description" | "heap_size" | "id" | "ingest.count" | "ingest.current" | "ingest.failed" | "ingest.pipelines" | "ingest.time" | "license" | "operations" | "version")[])*: A list of column names to display. +** *`s` (Optional, Enum("create_time" | "created_by" | "data_frame_analytics_id" | "description" | "heap_size" | "id" | "ingest.count" | "ingest.current" | "ingest.failed" | "ingest.pipelines" | "ingest.time" | "license" | "operations" | "version") | Enum("create_time" | "created_by" | "data_frame_analytics_id" | "description" | "heap_size" | "id" | "ingest.count" | "ingest.current" | "ingest.failed" | "ingest.pipelines" | "ingest.time" | "license" | "operations" | "version")[])*: A list of column names or aliases used to sort the response. +** *`from` (Optional, number)*: Skips the specified number of transforms. +** *`size` (Optional, number)*: The maximum number of transforms to display. +** *`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))*: Unit used to display time values. + +[discrete] +==== nodeattrs +Get node attribute information. + +Get information about custom node attributes. +IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-nodeattrs[Endpoint documentation] +[source,ts] +---- +client.cat.nodeattrs({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`h` (Optional, string | string[])*: List of columns to appear in the response. Supports simple wildcards. +** *`s` (Optional, string | string[])*: List of columns that determine how the table should be sorted. +Sorting defaults to ascending and can be changed by setting `:asc` +or `:desc` as a suffix to the column name. +** *`local` (Optional, boolean)*: If `true`, the request computes the list of selected nodes from the +local cluster state. If `false` the list of selected nodes are computed +from the cluster state of the master node. In both cases the coordinating +node will send requests for further information to each selected node. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. + +[discrete] +==== nodes +Get node information. + +Get information about the nodes in a cluster. +IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-nodes[Endpoint documentation] +[source,ts] +---- +client.cat.nodes({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`bytes` (Optional, Enum("b" | "kb" | "mb" | "gb" | "tb" | "pb"))*: The unit used to display byte values. +** *`full_id` (Optional, boolean | string)*: If `true`, return the full node ID. If `false`, return the shortened node ID. +** *`include_unloaded_segments` (Optional, boolean)*: If true, the response includes information from segments that are not loaded into memory. +** *`h` (Optional, string | string[])*: List of columns to appear in the response. Supports simple wildcards. +** *`s` (Optional, string | string[])*: List of columns that determine how the table should be sorted. +Sorting defaults to ascending and can be changed by setting `:asc` +or `:desc` as a suffix to the column name. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. +** *`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))*: Unit used to display time values. + +[discrete] +==== pending_tasks +Get pending task information. + +Get information about cluster-level changes that have not yet taken effect. +IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the pending cluster tasks API. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-pending-tasks[Endpoint documentation] +[source,ts] +---- +client.cat.pendingTasks({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`h` (Optional, string | string[])*: List of columns to appear in the response. Supports simple wildcards. +** *`s` (Optional, string | string[])*: List of columns that determine how the table should be sorted. +Sorting defaults to ascending and can be changed by setting `:asc` +or `:desc` as a suffix to the column name. +** *`local` (Optional, boolean)*: If `true`, the request computes the list of selected nodes from the +local cluster state. If `false` the list of selected nodes are computed +from the cluster state of the master node. In both cases the coordinating +node will send requests for further information to each selected node. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. +** *`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))*: Unit used to display time values. + +[discrete] +==== plugins +Get plugin information. + +Get a list of plugins running on each node of a cluster. +IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-plugins[Endpoint documentation] +[source,ts] +---- +client.cat.plugins({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`h` (Optional, string | string[])*: List of columns to appear in the response. Supports simple wildcards. +** *`s` (Optional, string | string[])*: List of columns that determine how the table should be sorted. +Sorting defaults to ascending and can be changed by setting `:asc` +or `:desc` as a suffix to the column name. +** *`include_bootstrap` (Optional, boolean)*: Include bootstrap plugins in the response +** *`local` (Optional, boolean)*: If `true`, the request computes the list of selected nodes from the +local cluster state. If `false` the list of selected nodes are computed +from the cluster state of the master node. In both cases the coordinating +node will send requests for further information to each selected node. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. + +[discrete] +==== recovery +Get shard recovery information. + +Get information about ongoing and completed shard recoveries. +Shard recovery is the process of initializing a shard copy, such as restoring a primary shard from a snapshot or syncing a replica shard from a primary shard. When a shard recovery completes, the recovered shard is available for search and indexing. +For data streams, the API returns information about the stream’s backing indices. +IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the index recovery API. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-recovery[Endpoint documentation] +[source,ts] +---- +client.cat.recovery({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (Optional, string | string[])*: A list of data streams, indices, and aliases used to limit the request. +Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. +** *`active_only` (Optional, boolean)*: If `true`, the response only includes ongoing shard recoveries. +** *`bytes` (Optional, Enum("b" | "kb" | "mb" | "gb" | "tb" | "pb"))*: The unit used to display byte values. +** *`detailed` (Optional, boolean)*: If `true`, the response includes detailed information about shard recoveries. +** *`h` (Optional, string | string[])*: List of columns to appear in the response. Supports simple wildcards. +** *`s` (Optional, string | string[])*: List of columns that determine how the table should be sorted. +Sorting defaults to ascending and can be changed by setting `:asc` +or `:desc` as a suffix to the column name. +** *`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))*: Unit used to display time values. + +[discrete] +==== repositories +Get snapshot repository information. + +Get a list of snapshot repositories for a cluster. +IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get snapshot repository API. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-repositories[Endpoint documentation] +[source,ts] +---- +client.cat.repositories({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`h` (Optional, string | string[])*: List of columns to appear in the response. Supports simple wildcards. +** *`s` (Optional, string | string[])*: List of columns that determine how the table should be sorted. +Sorting defaults to ascending and can be changed by setting `:asc` +or `:desc` as a suffix to the column name. +** *`local` (Optional, boolean)*: If `true`, the request computes the list of selected nodes from the +local cluster state. If `false` the list of selected nodes are computed +from the cluster state of the master node. In both cases the coordinating +node will send requests for further information to each selected node. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. + +[discrete] +==== segments +Get segment information. + +Get low-level information about the Lucene segments in index shards. +For data streams, the API returns information about the backing indices. +IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the index segments API. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-segments[Endpoint documentation] +[source,ts] +---- +client.cat.segments({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (Optional, string | string[])*: A list of data streams, indices, and aliases used to limit the request. +Supports wildcards (`*`). +To target all data streams and indices, omit this parameter or use `*` or `_all`. +** *`bytes` (Optional, Enum("b" | "kb" | "mb" | "gb" | "tb" | "pb"))*: The unit used to display byte values. +** *`h` (Optional, string | string[])*: List of columns to appear in the response. Supports simple wildcards. +** *`s` (Optional, string | string[])*: List of columns that determine how the table should be sorted. +Sorting defaults to ascending and can be changed by setting `:asc` +or `:desc` as a suffix to the column name. +** *`local` (Optional, boolean)*: If `true`, the request computes the list of selected nodes from the +local cluster state. If `false` the list of selected nodes are computed +from the cluster state of the master node. In both cases the coordinating +node will send requests for further information to each selected node. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. + +[discrete] +==== shards +Get shard information. + +Get information about the shards in a cluster. +For data streams, the API returns information about the backing indices. +IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-shards[Endpoint documentation] +[source,ts] +---- +client.cat.shards({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (Optional, string | string[])*: A list of data streams, indices, and aliases used to limit the request. +Supports wildcards (`*`). +To target all data streams and indices, omit this parameter or use `*` or `_all`. +** *`bytes` (Optional, Enum("b" | "kb" | "mb" | "gb" | "tb" | "pb"))*: The unit used to display byte values. +** *`h` (Optional, string | string[])*: List of columns to appear in the response. Supports simple wildcards. +** *`s` (Optional, string | string[])*: List of columns that determine how the table should be sorted. +Sorting defaults to ascending and can be changed by setting `:asc` +or `:desc` as a suffix to the column name. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. +** *`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))*: Unit used to display time values. + +[discrete] +==== snapshots +Get snapshot information. + +Get information about the snapshots stored in one or more repositories. +A snapshot is a backup of an index or running Elasticsearch cluster. +IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get snapshot API. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-snapshots[Endpoint documentation] +[source,ts] +---- +client.cat.snapshots({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`repository` (Optional, string | string[])*: A list of snapshot repositories used to limit the request. +Accepts wildcard expressions. +`_all` returns all repositories. +If any repository fails during the request, Elasticsearch returns an error. +** *`ignore_unavailable` (Optional, boolean)*: If `true`, the response does not include information from unavailable snapshots. +** *`h` (Optional, string | string[])*: List of columns to appear in the response. Supports simple wildcards. +** *`s` (Optional, string | string[])*: List of columns that determine how the table should be sorted. +Sorting defaults to ascending and can be changed by setting `:asc` +or `:desc` as a suffix to the column name. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. +** *`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))*: Unit used to display time values. + +[discrete] +==== tasks +Get task information. + +Get information about tasks currently running in the cluster. +IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the task management API. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-tasks[Endpoint documentation] +[source,ts] +---- +client.cat.tasks({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`actions` (Optional, string[])*: The task action names, which are used to limit the response. +** *`detailed` (Optional, boolean)*: If `true`, the response includes detailed information about shard recoveries. +** *`nodes` (Optional, string[])*: Unique node identifiers, which are used to limit the response. +** *`parent_task_id` (Optional, string)*: The parent task identifier, which is used to limit the response. +** *`h` (Optional, string | string[])*: List of columns to appear in the response. Supports simple wildcards. +** *`s` (Optional, string | string[])*: List of columns that determine how the table should be sorted. +Sorting defaults to ascending and can be changed by setting `:asc` +or `:desc` as a suffix to the column name. +** *`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))*: Unit used to display time values. +** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. +** *`wait_for_completion` (Optional, boolean)*: If `true`, the request blocks until the task has completed. + +[discrete] +==== templates +Get index template information. + +Get information about the index templates in a cluster. +You can use index templates to apply index settings and field mappings to new indices at creation. +IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get index template API. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-templates[Endpoint documentation] +[source,ts] +---- +client.cat.templates({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`name` (Optional, string)*: The name of the template to return. +Accepts wildcard expressions. If omitted, all templates are returned. +** *`h` (Optional, string | string[])*: List of columns to appear in the response. Supports simple wildcards. +** *`s` (Optional, string | string[])*: List of columns that determine how the table should be sorted. +Sorting defaults to ascending and can be changed by setting `:asc` +or `:desc` as a suffix to the column name. +** *`local` (Optional, boolean)*: If `true`, the request computes the list of selected nodes from the +local cluster state. If `false` the list of selected nodes are computed +from the cluster state of the master node. In both cases the coordinating +node will send requests for further information to each selected node. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. + +[discrete] +==== thread_pool +Get thread pool statistics. + +Get thread pool statistics for each node in a cluster. +Returned information includes all built-in thread pools and custom thread pools. +IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-thread-pool[Endpoint documentation] +[source,ts] +---- +client.cat.threadPool({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`thread_pool_patterns` (Optional, string | string[])*: A list of thread pool names used to limit the request. +Accepts wildcard expressions. +** *`h` (Optional, string | string[])*: List of columns to appear in the response. Supports simple wildcards. +** *`s` (Optional, string | string[])*: List of columns that determine how the table should be sorted. +Sorting defaults to ascending and can be changed by setting `:asc` +or `:desc` as a suffix to the column name. +** *`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))*: The unit used to display time values. +** *`local` (Optional, boolean)*: If `true`, the request computes the list of selected nodes from the +local cluster state. If `false` the list of selected nodes are computed +from the cluster state of the master node. In both cases the coordinating +node will send requests for further information to each selected node. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. + +[discrete] +==== transforms +Get transform information. + +Get configuration and usage information about transforms. + +CAT APIs are only intended for human consumption using the Kibana +console or command line. They are not intended for use by applications. For +application consumption, use the get transform statistics API. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-transforms[Endpoint documentation] +[source,ts] +---- +client.cat.transforms({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`transform_id` (Optional, string)*: A transform identifier or a wildcard expression. +If you do not specify one of these options, the API returns information for all transforms. +** *`allow_no_match` (Optional, boolean)*: Specifies what to do when the request: contains wildcard expressions and there are no transforms that match; contains the `_all` string or no identifiers and there are no matches; contains wildcard expressions and there are only partial matches. +If `true`, it returns an empty transforms array when there are no matches and the subset of results when there are partial matches. +If `false`, the request returns a 404 status code when there are no matches or only partial matches. +** *`from` (Optional, number)*: Skips the specified number of transforms. +** *`h` (Optional, Enum("changes_last_detection_time" | "checkpoint" | "checkpoint_duration_time_exp_avg" | "checkpoint_progress" | "create_time" | "delete_time" | "description" | "dest_index" | "documents_deleted" | "documents_indexed" | "docs_per_second" | "documents_processed" | "frequency" | "id" | "index_failure" | "index_time" | "index_total" | "indexed_documents_exp_avg" | "last_search_time" | "max_page_search_size" | "pages_processed" | "pipeline" | "processed_documents_exp_avg" | "processing_time" | "reason" | "search_failure" | "search_time" | "search_total" | "source_index" | "state" | "transform_type" | "trigger_count" | "version") | Enum("changes_last_detection_time" | "checkpoint" | "checkpoint_duration_time_exp_avg" | "checkpoint_progress" | "create_time" | "delete_time" | "description" | "dest_index" | "documents_deleted" | "documents_indexed" | "docs_per_second" | "documents_processed" | "frequency" | "id" | "index_failure" | "index_time" | "index_total" | "indexed_documents_exp_avg" | "last_search_time" | "max_page_search_size" | "pages_processed" | "pipeline" | "processed_documents_exp_avg" | "processing_time" | "reason" | "search_failure" | "search_time" | "search_total" | "source_index" | "state" | "transform_type" | "trigger_count" | "version")[])*: List of column names to display. +** *`s` (Optional, Enum("changes_last_detection_time" | "checkpoint" | "checkpoint_duration_time_exp_avg" | "checkpoint_progress" | "create_time" | "delete_time" | "description" | "dest_index" | "documents_deleted" | "documents_indexed" | "docs_per_second" | "documents_processed" | "frequency" | "id" | "index_failure" | "index_time" | "index_total" | "indexed_documents_exp_avg" | "last_search_time" | "max_page_search_size" | "pages_processed" | "pipeline" | "processed_documents_exp_avg" | "processing_time" | "reason" | "search_failure" | "search_time" | "search_total" | "source_index" | "state" | "transform_type" | "trigger_count" | "version") | Enum("changes_last_detection_time" | "checkpoint" | "checkpoint_duration_time_exp_avg" | "checkpoint_progress" | "create_time" | "delete_time" | "description" | "dest_index" | "documents_deleted" | "documents_indexed" | "docs_per_second" | "documents_processed" | "frequency" | "id" | "index_failure" | "index_time" | "index_total" | "indexed_documents_exp_avg" | "last_search_time" | "max_page_search_size" | "pages_processed" | "pipeline" | "processed_documents_exp_avg" | "processing_time" | "reason" | "search_failure" | "search_time" | "search_total" | "source_index" | "state" | "transform_type" | "trigger_count" | "version")[])*: List of column names or column aliases used to sort the response. +** *`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))*: The unit used to display time values. +** *`size` (Optional, number)*: The maximum number of transforms to obtain. + +[discrete] +=== ccr +[discrete] +==== delete_auto_follow_pattern +Delete auto-follow patterns. + +Delete a collection of cross-cluster replication auto-follow patterns. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-delete-auto-follow-pattern[Endpoint documentation] +[source,ts] +---- +client.ccr.deleteAutoFollowPattern({ name }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`name` (string)*: The auto-follow pattern collection to delete. +** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for a connection to the master node. +If the master node is not available before the timeout expires, the request fails and returns an error. +It can also be set to `-1` to indicate that the request should never timeout. + +[discrete] +==== follow +Create a follower. +Create a cross-cluster replication follower index that follows a specific leader index. +When the API returns, the follower index exists and cross-cluster replication starts replicating operations from the leader index to the follower index. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-follow[Endpoint documentation] +[source,ts] +---- +client.ccr.follow({ index, leader_index, remote_cluster }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (string)*: The name of the follower index. +** *`leader_index` (string)*: The name of the index in the leader cluster to follow. +** *`remote_cluster` (string)*: The remote cluster containing the leader index. +** *`data_stream_name` (Optional, string)*: If the leader index is part of a data stream, the name to which the local data stream for the followed index should be renamed. +** *`max_outstanding_read_requests` (Optional, number)*: The maximum number of outstanding reads requests from the remote cluster. +** *`max_outstanding_write_requests` (Optional, number)*: The maximum number of outstanding write requests on the follower. +** *`max_read_request_operation_count` (Optional, number)*: The maximum number of operations to pull per read from the remote cluster. +** *`max_read_request_size` (Optional, number | string)*: The maximum size in bytes of per read of a batch of operations pulled from the remote cluster. +** *`max_retry_delay` (Optional, string | -1 | 0)*: The maximum time to wait before retrying an operation that failed exceptionally. An exponential backoff strategy is employed when +retrying. +** *`max_write_buffer_count` (Optional, number)*: The maximum number of operations that can be queued for writing. When this limit is reached, reads from the remote cluster will be +deferred until the number of queued operations goes below the limit. +** *`max_write_buffer_size` (Optional, number | string)*: The maximum total bytes of operations that can be queued for writing. When this limit is reached, reads from the remote cluster will +be deferred until the total bytes of queued operations goes below the limit. +** *`max_write_request_operation_count` (Optional, number)*: The maximum number of operations per bulk write request executed on the follower. +** *`max_write_request_size` (Optional, number | string)*: The maximum total bytes of operations per bulk write request executed on the follower. +** *`read_poll_timeout` (Optional, string | -1 | 0)*: The maximum time to wait for new operations on the remote cluster when the follower index is synchronized with the leader index. +When the timeout has elapsed, the poll for operations will return to the follower so that it can update some statistics. +Then the follower will immediately attempt to read from the leader again. +** *`settings` (Optional, { index, mode, routing_path, soft_deletes, sort, number_of_shards, number_of_replicas, number_of_routing_shards, check_on_startup, codec, routing_partition_size, load_fixed_bitset_filters_eagerly, hidden, auto_expand_replicas, merge, search, refresh_interval, max_result_window, max_inner_result_window, max_rescore_window, max_docvalue_fields_search, max_script_fields, max_ngram_diff, max_shingle_diff, blocks, max_refresh_listeners, analyze, highlight, max_terms_count, max_regex_length, routing, gc_deletes, default_pipeline, final_pipeline, lifecycle, provided_name, creation_date, creation_date_string, uuid, version, verified_before_close, format, max_slices_per_scroll, translog, query_string, priority, top_metrics_max_size, analysis, settings, time_series, queries, similarity, mapping, indexing.slowlog, indexing_pressure, store })*: Settings to override from the leader index. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. +** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: Specifies the number of shards to wait on being active before responding. This defaults to waiting on none of the shards to be +active. +A shard must be restored from the leader index before being active. Restoring a follower shard requires transferring all the +remote Lucene segment files to the follower index. + +[discrete] +==== follow_info +Get follower information. + +Get information about all cross-cluster replication follower indices. +For example, the results include follower index names, leader index names, replication options, and whether the follower indices are active or paused. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-follow-info[Endpoint documentation] +[source,ts] +---- +client.ccr.followInfo({ index }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (string | string[])*: A comma-delimited list of follower index patterns. +** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for a connection to the master node. +If the master node is not available before the timeout expires, the request fails and returns an error. +It can also be set to `-1` to indicate that the request should never timeout. + +[discrete] +==== follow_stats +Get follower stats. + +Get cross-cluster replication follower stats. +The API returns shard-level stats about the "following tasks" associated with each shard for the specified indices. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-follow-stats[Endpoint documentation] +[source,ts] +---- +client.ccr.followStats({ index }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (string | string[])*: A comma-delimited list of index patterns. +** *`timeout` (Optional, string | -1 | 0)*: The period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. + +[discrete] +==== forget_follower +Forget a follower. +Remove the cross-cluster replication follower retention leases from the leader. + +A following index takes out retention leases on its leader index. +These leases are used to increase the likelihood that the shards of the leader index retain the history of operations that the shards of the following index need to run replication. +When a follower index is converted to a regular index by the unfollow API (either by directly calling the API or by index lifecycle management tasks), these leases are removed. +However, removal of the leases can fail, for example when the remote cluster containing the leader index is unavailable. +While the leases will eventually expire on their own, their extended existence can cause the leader index to hold more history than necessary and prevent index lifecycle management from performing some operations on the leader index. +This API exists to enable manually removing the leases when the unfollow API is unable to do so. + +NOTE: This API does not stop replication by a following index. If you use this API with a follower index that is still actively following, the following index will add back retention leases on the leader. +The only purpose of this API is to handle the case of failure to remove the following retention leases after the unfollow API is invoked. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-forget-follower[Endpoint documentation] +[source,ts] +---- +client.ccr.forgetFollower({ index }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (string)*: the name of the leader index for which specified follower retention leases should be removed +** *`follower_cluster` (Optional, string)* +** *`follower_index` (Optional, string)* +** *`follower_index_uuid` (Optional, string)* +** *`leader_remote_cluster` (Optional, string)* +** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + +[discrete] +==== get_auto_follow_pattern +Get auto-follow patterns. + +Get cross-cluster replication auto-follow patterns. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-get-auto-follow-pattern-1[Endpoint documentation] +[source,ts] +---- +client.ccr.getAutoFollowPattern({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`name` (Optional, string)*: The auto-follow pattern collection that you want to retrieve. +If you do not specify a name, the API returns information for all collections. +** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for a connection to the master node. +If the master node is not available before the timeout expires, the request fails and returns an error. +It can also be set to `-1` to indicate that the request should never timeout. + +[discrete] +==== pause_auto_follow_pattern +Pause an auto-follow pattern. + +Pause a cross-cluster replication auto-follow pattern. +When the API returns, the auto-follow pattern is inactive. +New indices that are created on the remote cluster and match the auto-follow patterns are ignored. + +You can resume auto-following with the resume auto-follow pattern API. +When it resumes, the auto-follow pattern is active again and automatically configures follower indices for newly created indices on the remote cluster that match its patterns. +Remote indices that were created while the pattern was paused will also be followed, unless they have been deleted or closed in the interim. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-pause-auto-follow-pattern[Endpoint documentation] +[source,ts] +---- +client.ccr.pauseAutoFollowPattern({ name }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`name` (string)*: The name of the auto-follow pattern to pause. +** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for a connection to the master node. +If the master node is not available before the timeout expires, the request fails and returns an error. +It can also be set to `-1` to indicate that the request should never timeout. + +[discrete] +==== pause_follow +Pause a follower. + +Pause a cross-cluster replication follower index. +The follower index will not fetch any additional operations from the leader index. +You can resume following with the resume follower API. +You can pause and resume a follower index to change the configuration of the following task. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-pause-follow[Endpoint documentation] +[source,ts] +---- +client.ccr.pauseFollow({ index }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (string)*: The name of the follower index. +** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for a connection to the master node. +If the master node is not available before the timeout expires, the request fails and returns an error. +It can also be set to `-1` to indicate that the request should never timeout. + +[discrete] +==== put_auto_follow_pattern +Create or update auto-follow patterns. +Create a collection of cross-cluster replication auto-follow patterns for a remote cluster. +Newly created indices on the remote cluster that match any of the patterns are automatically configured as follower indices. +Indices on the remote cluster that were created before the auto-follow pattern was created will not be auto-followed even if they match the pattern. + +This API can also be used to update auto-follow patterns. +NOTE: Follower indices that were configured automatically before updating an auto-follow pattern will remain unchanged even if they do not match against the new patterns. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-put-auto-follow-pattern[Endpoint documentation] +[source,ts] +---- +client.ccr.putAutoFollowPattern({ name, remote_cluster }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`name` (string)*: The name of the collection of auto-follow patterns. +** *`remote_cluster` (string)*: The remote cluster containing the leader indices to match against. +** *`follow_index_pattern` (Optional, string)*: The name of follower index. The template {{leader_index}} can be used to derive the name of the follower index from the name of the leader index. When following a data stream, use {{leader_index}}; CCR does not support changes to the names of a follower data stream’s backing indices. +** *`leader_index_patterns` (Optional, string[])*: An array of simple index patterns to match against indices in the remote cluster specified by the remote_cluster field. +** *`leader_index_exclusion_patterns` (Optional, string[])*: An array of simple index patterns that can be used to exclude indices from being auto-followed. Indices in the remote cluster whose names are matching one or more leader_index_patterns and one or more leader_index_exclusion_patterns won’t be followed. +** *`max_outstanding_read_requests` (Optional, number)*: The maximum number of outstanding reads requests from the remote cluster. +** *`settings` (Optional, Record)*: Settings to override from the leader index. Note that certain settings can not be overrode (e.g., index.number_of_shards). +** *`max_outstanding_write_requests` (Optional, number)*: The maximum number of outstanding reads requests from the remote cluster. +** *`read_poll_timeout` (Optional, string | -1 | 0)*: The maximum time to wait for new operations on the remote cluster when the follower index is synchronized with the leader index. When the timeout has elapsed, the poll for operations will return to the follower so that it can update some statistics. Then the follower will immediately attempt to read from the leader again. +** *`max_read_request_operation_count` (Optional, number)*: The maximum number of operations to pull per read from the remote cluster. +** *`max_read_request_size` (Optional, number | string)*: The maximum size in bytes of per read of a batch of operations pulled from the remote cluster. +** *`max_retry_delay` (Optional, string | -1 | 0)*: The maximum time to wait before retrying an operation that failed exceptionally. An exponential backoff strategy is employed when retrying. +** *`max_write_buffer_count` (Optional, number)*: The maximum number of operations that can be queued for writing. When this limit is reached, reads from the remote cluster will be deferred until the number of queued operations goes below the limit. +** *`max_write_buffer_size` (Optional, number | string)*: The maximum total bytes of operations that can be queued for writing. When this limit is reached, reads from the remote cluster will be deferred until the total bytes of queued operations goes below the limit. +** *`max_write_request_operation_count` (Optional, number)*: The maximum number of operations per bulk write request executed on the follower. +** *`max_write_request_size` (Optional, number | string)*: The maximum total bytes of operations per bulk write request executed on the follower. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. + +[discrete] +==== resume_auto_follow_pattern +Resume an auto-follow pattern. + +Resume a cross-cluster replication auto-follow pattern that was paused. +The auto-follow pattern will resume configuring following indices for newly created indices that match its patterns on the remote cluster. +Remote indices created while the pattern was paused will also be followed unless they have been deleted or closed in the interim. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-resume-auto-follow-pattern[Endpoint documentation] +[source,ts] +---- +client.ccr.resumeAutoFollowPattern({ name }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`name` (string)*: The name of the auto-follow pattern to resume. +** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for a connection to the master node. +If the master node is not available before the timeout expires, the request fails and returns an error. +It can also be set to `-1` to indicate that the request should never timeout. + +[discrete] +==== resume_follow +Resume a follower. +Resume a cross-cluster replication follower index that was paused. +The follower index could have been paused with the pause follower API. +Alternatively it could be paused due to replication that cannot be retried due to failures during following tasks. +When this API returns, the follower index will resume fetching operations from the leader index. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-resume-follow[Endpoint documentation] +[source,ts] +---- +client.ccr.resumeFollow({ index }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (string)*: The name of the follow index to resume following. +** *`max_outstanding_read_requests` (Optional, number)* +** *`max_outstanding_write_requests` (Optional, number)* +** *`max_read_request_operation_count` (Optional, number)* +** *`max_read_request_size` (Optional, string)* +** *`max_retry_delay` (Optional, string | -1 | 0)* +** *`max_write_buffer_count` (Optional, number)* +** *`max_write_buffer_size` (Optional, string)* +** *`max_write_request_operation_count` (Optional, number)* +** *`max_write_request_size` (Optional, string)* +** *`read_poll_timeout` (Optional, string | -1 | 0)* +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. + +[discrete] +==== stats +Get cross-cluster replication stats. + +This API returns stats about auto-following and the same shard-level stats as the get follower stats API. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-stats[Endpoint documentation] +[source,ts] +---- +client.ccr.stats({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for a connection to the master node. +If the master node is not available before the timeout expires, the request fails and returns an error. +It can also be set to `-1` to indicate that the request should never timeout. +** *`timeout` (Optional, string | -1 | 0)*: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + +[discrete] +==== unfollow +Unfollow an index. + +Convert a cross-cluster replication follower index to a regular index. +The API stops the following task associated with a follower index and removes index metadata and settings associated with cross-cluster replication. +The follower index must be paused and closed before you call the unfollow API. + +> info +> Currently cross-cluster replication does not support converting an existing regular index to a follower index. Converting a follower index to a regular index is an irreversible operation. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-unfollow[Endpoint documentation] +[source,ts] +---- +client.ccr.unfollow({ index }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (string)*: The name of the follower index. +** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for a connection to the master node. +If the master node is not available before the timeout expires, the request fails and returns an error. +It can also be set to `-1` to indicate that the request should never timeout. + +[discrete] +=== cluster +[discrete] +==== allocation_explain +Explain the shard allocations. +Get explanations for shard allocations in the cluster. +For unassigned shards, it provides an explanation for why the shard is unassigned. +For assigned shards, it provides an explanation for why the shard is remaining on its current node and has not moved or rebalanced to another node. +This API can be very useful when attempting to diagnose why a shard is unassigned or why a shard continues to remain on its current node when you might expect otherwise. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-allocation-explain[Endpoint documentation] +[source,ts] +---- +client.cluster.allocationExplain({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`current_node` (Optional, string)*: Specifies the node ID or the name of the node to only explain a shard that is currently located on the specified node. +** *`index` (Optional, string)*: Specifies the name of the index that you would like an explanation for. +** *`primary` (Optional, boolean)*: If true, returns explanation for the primary shard for the given shard ID. +** *`shard` (Optional, number)*: Specifies the ID of the shard that you would like an explanation for. +** *`include_disk_info` (Optional, boolean)*: If true, returns information about disk usage and shard sizes. +** *`include_yes_decisions` (Optional, boolean)*: If true, returns YES decisions in explanation. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. + +[discrete] +==== delete_component_template +Delete component templates. +Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-put-component-template[Endpoint documentation] +[source,ts] +---- +client.cluster.deleteComponentTemplate({ name }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`name` (string | string[])*: List or wildcard expression of component template names used to limit the request. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. + +[discrete] +==== delete_voting_config_exclusions +Clear cluster voting config exclusions. +Remove master-eligible nodes from the voting configuration exclusion list. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-post-voting-config-exclusions[Endpoint documentation] +[source,ts] +---- +client.cluster.deleteVotingConfigExclusions({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. +** *`wait_for_removal` (Optional, boolean)*: Specifies whether to wait for all excluded nodes to be removed from the +cluster before clearing the voting configuration exclusions list. +Defaults to true, meaning that all excluded nodes must be removed from +the cluster before this API takes any action. If set to false then the +voting configuration exclusions list is cleared even if some excluded +nodes are still in the cluster. + +[discrete] +==== exists_component_template +Check component templates. +Returns information about whether a particular component template exists. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-put-component-template[Endpoint documentation] +[source,ts] +---- +client.cluster.existsComponentTemplate({ name }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`name` (string | string[])*: List of component template names used to limit the request. +Wildcard (*) expressions are supported. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is +received before the timeout expires, the request fails and returns an +error. +** *`local` (Optional, boolean)*: If true, the request retrieves information from the local node only. +Defaults to false, which means information is retrieved from the master node. + +[discrete] +==== get_component_template +Get component templates. +Get information about component templates. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-put-component-template[Endpoint documentation] +[source,ts] +---- +client.cluster.getComponentTemplate({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`name` (Optional, string)*: List of component template names used to limit the request. +Wildcard (`*`) expressions are supported. +** *`flat_settings` (Optional, boolean)*: If `true`, returns settings in flat format. +** *`include_defaults` (Optional, boolean)*: Return all default configurations for the component template (default: false) +** *`local` (Optional, boolean)*: If `true`, the request retrieves information from the local node only. +If `false`, information is retrieved from the master node. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. + +[discrete] +==== get_settings +Get cluster-wide settings. +By default, it returns only settings that have been explicitly defined. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-get-settings[Endpoint documentation] +[source,ts] +---- +client.cluster.getSettings({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`flat_settings` (Optional, boolean)*: If `true`, returns settings in flat format. +** *`include_defaults` (Optional, boolean)*: If `true`, returns default cluster settings from the local node. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. + +[discrete] +==== health +Get the cluster health status. + +You can also use the API to get the health status of only specified data streams and indices. +For data streams, the API retrieves the health status of the stream’s backing indices. + +The cluster health status is: green, yellow or red. +On the shard level, a red status indicates that the specific shard is not allocated in the cluster. Yellow means that the primary shard is allocated but replicas are not. Green means that all shards are allocated. +The index level status is controlled by the worst shard status. + +One of the main benefits of the API is the ability to wait until the cluster reaches a certain high watermark health level. +The cluster status is controlled by the worst index status. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-health[Endpoint documentation] +[source,ts] +---- +client.cluster.health({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (Optional, string | string[])*: List of data streams, indices, and index aliases used to limit the request. Wildcard expressions (`*`) are supported. To target all data streams and indices in a cluster, omit this parameter or use _all or `*`. +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Whether to expand wildcard expression to concrete indices that are open, closed or both. +** *`level` (Optional, Enum("cluster" | "indices" | "shards"))*: Can be one of cluster, indices or shards. Controls the details level of the health information returned. +** *`local` (Optional, boolean)*: If true, the request retrieves information from the local node only. Defaults to false, which means information is retrieved from the master node. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: A number controlling to how many active shards to wait for, all to wait for all shards in the cluster to be active, or 0 to not wait. +** *`wait_for_events` (Optional, Enum("immediate" | "urgent" | "high" | "normal" | "low" | "languid"))*: Can be one of immediate, urgent, high, normal, low, languid. Wait until all currently queued events with the given priority are processed. +** *`wait_for_nodes` (Optional, string | number)*: The request waits until the specified number N of nodes is available. It also accepts >=N, <=N, >N and yellow > red. By default, will not wait for any status. + +[discrete] +==== info +Get cluster info. +Returns basic information about the cluster. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-info[Endpoint documentation] +[source,ts] +---- +client.cluster.info({ target }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`target` (Enum("_all" | "http" | "ingest" | "thread_pool" | "script") | Enum("_all" | "http" | "ingest" | "thread_pool" | "script")[])*: Limits the information returned to the specific target. Supports a list, such as http,ingest. + +[discrete] +==== pending_tasks +Get the pending cluster tasks. +Get information about cluster-level changes (such as create index, update mapping, allocate or fail shard) that have not yet taken effect. + +NOTE: This API returns a list of any pending updates to the cluster state. +These are distinct from the tasks reported by the task management API which include periodic tasks and tasks initiated by the user, such as node stats, search queries, or create index requests. +However, if a user-initiated task such as a create index command causes a cluster state update, the activity of this task might be reported by both task api and pending cluster tasks API. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-pending-tasks[Endpoint documentation] +[source,ts] +---- +client.cluster.pendingTasks({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`local` (Optional, boolean)*: If `true`, the request retrieves information from the local node only. +If `false`, information is retrieved from the master node. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. + +[discrete] +==== post_voting_config_exclusions +Update voting configuration exclusions. +Update the cluster voting config exclusions by node IDs or node names. +By default, if there are more than three master-eligible nodes in the cluster and you remove fewer than half of the master-eligible nodes in the cluster at once, the voting configuration automatically shrinks. +If you want to shrink the voting configuration to contain fewer than three nodes or to remove half or more of the master-eligible nodes in the cluster at once, use this API to remove departing nodes from the voting configuration manually. +The API adds an entry for each specified node to the cluster’s voting configuration exclusions list. +It then waits until the cluster has reconfigured its voting configuration to exclude the specified nodes. + +Clusters should have no voting configuration exclusions in normal operation. +Once the excluded nodes have stopped, clear the voting configuration exclusions with `DELETE /_cluster/voting_config_exclusions`. +This API waits for the nodes to be fully removed from the cluster before it returns. +If your cluster has voting configuration exclusions for nodes that you no longer intend to remove, use `DELETE /_cluster/voting_config_exclusions?wait_for_removal=false` to clear the voting configuration exclusions without waiting for the nodes to leave the cluster. + +A response to `POST /_cluster/voting_config_exclusions` with an HTTP status code of 200 OK guarantees that the node has been removed from the voting configuration and will not be reinstated until the voting configuration exclusions are cleared by calling `DELETE /_cluster/voting_config_exclusions`. +If the call to `POST /_cluster/voting_config_exclusions` fails or returns a response with an HTTP status code other than 200 OK then the node may not have been removed from the voting configuration. +In that case, you may safely retry the call. + +NOTE: Voting exclusions are required only when you remove at least half of the master-eligible nodes from a cluster in a short time period. +They are not required when removing master-ineligible nodes or when removing fewer than half of the master-eligible nodes. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-post-voting-config-exclusions[Endpoint documentation] +[source,ts] +---- +client.cluster.postVotingConfigExclusions({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`node_names` (Optional, string | string[])*: A list of the names of the nodes to exclude from the +voting configuration. If specified, you may not also specify node_ids. +** *`node_ids` (Optional, string | string[])*: A list of the persistent ids of the nodes to exclude +from the voting configuration. If specified, you may not also specify node_names. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. +** *`timeout` (Optional, string | -1 | 0)*: When adding a voting configuration exclusion, the API waits for the +specified nodes to be excluded from the voting configuration before +returning. If the timeout expires before the appropriate condition +is satisfied, the request fails and returns an error. + +[discrete] +==== put_component_template +Create or update a component template. +Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases. + +An index template can be composed of multiple component templates. +To use a component template, specify it in an index template’s `composed_of` list. +Component templates are only applied to new data streams and indices as part of a matching index template. + +Settings and mappings specified directly in the index template or the create index request override any settings or mappings specified in a component template. + +Component templates are only used during index creation. +For data streams, this includes data stream creation and the creation of a stream’s backing indices. +Changes to component templates do not affect existing indices, including a stream’s backing indices. + +You can use C-style `/* *\/` block comments in component templates. +You can include comments anywhere in the request body except before the opening curly bracket. + +**Applying component templates** + +You cannot directly apply a component template to a data stream or index. +To be applied, a component template must be included in an index template's `composed_of` list. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-put-component-template[Endpoint documentation] +[source,ts] +---- +client.cluster.putComponentTemplate({ name, template }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`name` (string)*: Name of the component template to create. +Elasticsearch includes the following built-in component templates: `logs-mappings`; `logs-settings`; `metrics-mappings`; `metrics-settings`;`synthetics-mapping`; `synthetics-settings`. +Elastic Agent uses these templates to configure backing indices for its data streams. +If you use Elastic Agent and want to overwrite one of these templates, set the `version` for your replacement template higher than the current version. +If you don’t use Elastic Agent and want to disable all built-in component and index templates, set `stack.templates.enabled` to `false` using the cluster update settings API. +** *`template` ({ aliases, mappings, settings, defaults, data_stream, lifecycle })*: The template to be applied which includes mappings, settings, or aliases configuration. +** *`version` (Optional, number)*: Version number used to manage component templates externally. +This number isn't automatically generated or incremented by Elasticsearch. +To unset a version, replace the template without specifying a version. +** *`_meta` (Optional, Record)*: Optional user metadata about the component template. +It may have any contents. This map is not automatically generated by Elasticsearch. +This information is stored in the cluster state, so keeping it short is preferable. +To unset `_meta`, replace the template without specifying this information. +** *`deprecated` (Optional, boolean)*: Marks this index template as deprecated. When creating or updating a non-deprecated index template +that uses deprecated components, Elasticsearch will emit a deprecation warning. +** *`create` (Optional, boolean)*: If `true`, this request cannot replace or update existing component templates. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. + +[discrete] +==== put_settings +Update the cluster settings. + +Configure and update dynamic settings on a running cluster. +You can also configure dynamic settings locally on an unstarted or shut down node in `elasticsearch.yml`. + +Updates made with this API can be persistent, which apply across cluster restarts, or transient, which reset after a cluster restart. +You can also reset transient or persistent settings by assigning them a null value. + +If you configure the same setting using multiple methods, Elasticsearch applies the settings in following order of precedence: 1) Transient setting; 2) Persistent setting; 3) `elasticsearch.yml` setting; 4) Default setting value. +For example, you can apply a transient setting to override a persistent setting or `elasticsearch.yml` setting. +However, a change to an `elasticsearch.yml` setting will not override a defined transient or persistent setting. + +TIP: In Elastic Cloud, use the user settings feature to configure all cluster settings. This method automatically rejects unsafe settings that could break your cluster. +If you run Elasticsearch on your own hardware, use this API to configure dynamic cluster settings. +Only use `elasticsearch.yml` for static cluster settings and node settings. +The API doesn’t require a restart and ensures a setting’s value is the same on all nodes. + +WARNING: Transient cluster settings are no longer recommended. Use persistent cluster settings instead. +If a cluster becomes unstable, transient settings can clear unexpectedly, resulting in a potentially undesired cluster configuration. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-put-settings[Endpoint documentation] +[source,ts] +---- +client.cluster.putSettings({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`persistent` (Optional, Record)* +** *`transient` (Optional, Record)* +** *`flat_settings` (Optional, boolean)*: Return settings in flat format (default: false) +** *`master_timeout` (Optional, string | -1 | 0)*: Explicit operation timeout for connection to master node +** *`timeout` (Optional, string | -1 | 0)*: Explicit operation timeout + +[discrete] +==== remote_info +Get remote cluster information. + +Get information about configured remote clusters. +The API returns connection and endpoint information keyed by the configured remote cluster alias. + +> info +> This API returns information that reflects current state on the local cluster. +> The `connected` field does not necessarily reflect whether a remote cluster is down or unavailable, only whether there is currently an open connection to it. +> Elasticsearch does not spontaneously try to reconnect to a disconnected remote cluster. +> To trigger a reconnection, attempt a cross-cluster search, ES|QL cross-cluster search, or try the [resolve cluster endpoint](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-resolve-cluster). + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-remote-info[Endpoint documentation] +[source,ts] +---- +client.cluster.remoteInfo() +---- + + +[discrete] +==== reroute +Reroute the cluster. +Manually change the allocation of individual shards in the cluster. +For example, a shard can be moved from one node to another explicitly, an allocation can be canceled, and an unassigned shard can be explicitly allocated to a specific node. + +It is important to note that after processing any reroute commands Elasticsearch will perform rebalancing as normal (respecting the values of settings such as `cluster.routing.rebalance.enable`) in order to remain in a balanced state. +For example, if the requested allocation includes moving a shard from node1 to node2 then this may cause a shard to be moved from node2 back to node1 to even things out. + +The cluster can be set to disable allocations using the `cluster.routing.allocation.enable` setting. +If allocations are disabled then the only allocations that will be performed are explicit ones given using the reroute command, and consequent allocations due to rebalancing. + +The cluster will attempt to allocate a shard a maximum of `index.allocation.max_retries` times in a row (defaults to `5`), before giving up and leaving the shard unallocated. +This scenario can be caused by structural problems such as having an analyzer which refers to a stopwords file which doesn’t exist on all nodes. + +Once the problem has been corrected, allocation can be manually retried by calling the reroute API with the `?retry_failed` URI query parameter, which will attempt a single retry round for these shards. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-reroute[Endpoint documentation] +[source,ts] +---- +client.cluster.reroute({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`commands` (Optional, { cancel, move, allocate_replica, allocate_stale_primary, allocate_empty_primary }[])*: Defines the commands to perform. +** *`dry_run` (Optional, boolean)*: If true, then the request simulates the operation. +It will calculate the result of applying the commands to the current cluster state and return the resulting cluster state after the commands (and rebalancing) have been applied; it will not actually perform the requested changes. +** *`explain` (Optional, boolean)*: If true, then the response contains an explanation of why the commands can or cannot run. +** *`metric` (Optional, string | string[])*: Limits the information returned to the specified metrics. +** *`retry_failed` (Optional, boolean)*: If true, then retries allocation of shards that are blocked due to too many subsequent allocation failures. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + +[discrete] +==== state +Get the cluster state. +Get comprehensive information about the state of the cluster. + +The cluster state is an internal data structure which keeps track of a variety of information needed by every node, including the identity and attributes of the other nodes in the cluster; cluster-wide settings; index metadata, including the mapping and settings for each index; the location and status of every shard copy in the cluster. + +The elected master node ensures that every node in the cluster has a copy of the same cluster state. +This API lets you retrieve a representation of this internal state for debugging or diagnostic purposes. +You may need to consult the Elasticsearch source code to determine the precise meaning of the response. + +By default the API will route requests to the elected master node since this node is the authoritative source of cluster states. +You can also retrieve the cluster state held on the node handling the API request by adding the `?local=true` query parameter. + +Elasticsearch may need to expend significant effort to compute a response to this API in larger clusters, and the response may comprise a very large quantity of data. +If you use this API repeatedly, your cluster may become unstable. + +WARNING: The response is a representation of an internal data structure. +Its format is not subject to the same compatibility guarantees as other more stable APIs and may change from version to version. +Do not query this API using external monitoring tools. +Instead, obtain the information you require using other more stable cluster APIs. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-state[Endpoint documentation] +[source,ts] +---- +client.cluster.state({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`metric` (Optional, string | string[])*: Limit the information returned to the specified metrics +** *`index` (Optional, string | string[])*: A list of index names; use `_all` or empty string to perform the operation on all indices +** *`allow_no_indices` (Optional, boolean)*: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Whether to expand wildcard expression to concrete indices that are open, closed or both. +** *`flat_settings` (Optional, boolean)*: Return settings in flat format (default: false) +** *`ignore_unavailable` (Optional, boolean)*: Whether specified concrete indices should be ignored when unavailable (missing or closed) +** *`local` (Optional, boolean)*: Return local information, do not retrieve the state from master node (default: false) +** *`master_timeout` (Optional, string | -1 | 0)*: Specify timeout for connection to master +** *`wait_for_metadata_version` (Optional, number)*: Wait for the metadata version to be equal or greater than the specified metadata version +** *`wait_for_timeout` (Optional, string | -1 | 0)*: The maximum time to wait for wait_for_metadata_version before timing out + +[discrete] +==== stats +Get cluster statistics. +Get basic index metrics (shard numbers, store size, memory usage) and information about the current nodes that form the cluster (number, roles, os, jvm versions, memory usage, cpu and installed plugins). + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-stats[Endpoint documentation] +[source,ts] +---- +client.cluster.stats({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`node_id` (Optional, string | string[])*: List of node filters used to limit returned information. Defaults to all nodes in the cluster. +** *`include_remotes` (Optional, boolean)*: Include remote cluster data into the response +** *`timeout` (Optional, string | -1 | 0)*: Period to wait for each node to respond. +If a node does not respond before its timeout expires, the response does not include its stats. +However, timed out nodes are included in the response’s `_nodes.failed` property. Defaults to no timeout. + +[discrete] +=== connector +[discrete] +==== check_in +Check in a connector. + +Update the `last_seen` field in the connector and set it to the current timestamp. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-check-in[Endpoint documentation] +[source,ts] +---- +client.connector.checkIn({ connector_id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`connector_id` (string)*: The unique identifier of the connector to be checked in + +[discrete] +==== delete +Delete a connector. + +Removes a connector and associated sync jobs. +This is a destructive action that is not recoverable. +NOTE: This action doesn’t delete any API keys, ingest pipelines, or data indices associated with the connector. +These need to be removed manually. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-delete[Endpoint documentation] +[source,ts] +---- +client.connector.delete({ connector_id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`connector_id` (string)*: The unique identifier of the connector to be deleted +** *`delete_sync_jobs` (Optional, boolean)*: A flag indicating if associated sync jobs should be also removed. Defaults to false. +** *`hard` (Optional, boolean)*: A flag indicating if the connector should be hard deleted. + +[discrete] +==== get +Get a connector. + +Get the details about a connector. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-get[Endpoint documentation] +[source,ts] +---- +client.connector.get({ connector_id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`connector_id` (string)*: The unique identifier of the connector +** *`include_deleted` (Optional, boolean)*: A flag to indicate if the desired connector should be fetched, even if it was soft-deleted. + +[discrete] +==== list +Get all connectors. + +Get information about all connectors. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-list[Endpoint documentation] +[source,ts] +---- +client.connector.list({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`from` (Optional, number)*: Starting offset (default: 0) +** *`size` (Optional, number)*: Specifies a max number of results to get +** *`index_name` (Optional, string | string[])*: A list of connector index names to fetch connector documents for +** *`connector_name` (Optional, string | string[])*: A list of connector names to fetch connector documents for +** *`service_type` (Optional, string | string[])*: A list of connector service types to fetch connector documents for +** *`include_deleted` (Optional, boolean)*: A flag to indicate if the desired connector should be fetched, even if it was soft-deleted. +** *`query` (Optional, string)*: A wildcard query string that filters connectors with matching name, description or index name + +[discrete] +==== post +Create a connector. + +Connectors are Elasticsearch integrations that bring content from third-party data sources, which can be deployed on Elastic Cloud or hosted on your own infrastructure. +Elastic managed connectors (Native connectors) are a managed service on Elastic Cloud. +Self-managed connectors (Connector clients) are self-managed on your infrastructure. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-put[Endpoint documentation] +[source,ts] +---- +client.connector.post({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`description` (Optional, string)* +** *`index_name` (Optional, string)* +** *`is_native` (Optional, boolean)* +** *`language` (Optional, string)* +** *`name` (Optional, string)* +** *`service_type` (Optional, string)* + +[discrete] +==== put +Create or update a connector. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-put[Endpoint documentation] +[source,ts] +---- +client.connector.put({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`connector_id` (Optional, string)*: The unique identifier of the connector to be created or updated. ID is auto-generated if not provided. +** *`description` (Optional, string)* +** *`index_name` (Optional, string)* +** *`is_native` (Optional, boolean)* +** *`language` (Optional, string)* +** *`name` (Optional, string)* +** *`service_type` (Optional, string)* + +[discrete] +==== sync_job_cancel +Cancel a connector sync job. + +Cancel a connector sync job, which sets the status to cancelling and updates `cancellation_requested_at` to the current time. +The connector service is then responsible for setting the status of connector sync jobs to cancelled. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-cancel[Endpoint documentation] +[source,ts] +---- +client.connector.syncJobCancel({ connector_sync_job_id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`connector_sync_job_id` (string)*: The unique identifier of the connector sync job + +[discrete] +==== sync_job_check_in +Check in a connector sync job. +Check in a connector sync job and set the `last_seen` field to the current time before updating it in the internal index. + +To sync data using self-managed connectors, you need to deploy the Elastic connector service on your own infrastructure. +This service runs automatically on Elastic Cloud for Elastic managed connectors. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-check-in[Endpoint documentation] +[source,ts] +---- +client.connector.syncJobCheckIn({ connector_sync_job_id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`connector_sync_job_id` (string)*: The unique identifier of the connector sync job to be checked in. + +[discrete] +==== sync_job_claim +Claim a connector sync job. +This action updates the job status to `in_progress` and sets the `last_seen` and `started_at` timestamps to the current time. +Additionally, it can set the `sync_cursor` property for the sync job. + +This API is not intended for direct connector management by users. +It supports the implementation of services that utilize the connector protocol to communicate with Elasticsearch. + +To sync data using self-managed connectors, you need to deploy the Elastic connector service on your own infrastructure. +This service runs automatically on Elastic Cloud for Elastic managed connectors. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-claim[Endpoint documentation] +[source,ts] +---- +client.connector.syncJobClaim({ connector_sync_job_id, worker_hostname }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`connector_sync_job_id` (string)*: The unique identifier of the connector sync job. +** *`worker_hostname` (string)*: The host name of the current system that will run the job. +** *`sync_cursor` (Optional, User-defined value)*: The cursor object from the last incremental sync job. +This should reference the `sync_cursor` field in the connector state for which the job runs. + +[discrete] +==== sync_job_delete +Delete a connector sync job. + +Remove a connector sync job and its associated data. +This is a destructive action that is not recoverable. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-delete[Endpoint documentation] +[source,ts] +---- +client.connector.syncJobDelete({ connector_sync_job_id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`connector_sync_job_id` (string)*: The unique identifier of the connector sync job to be deleted + +[discrete] +==== sync_job_error +Set a connector sync job error. +Set the `error` field for a connector sync job and set its `status` to `error`. + +To sync data using self-managed connectors, you need to deploy the Elastic connector service on your own infrastructure. +This service runs automatically on Elastic Cloud for Elastic managed connectors. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-error[Endpoint documentation] +[source,ts] +---- +client.connector.syncJobError({ connector_sync_job_id, error }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`connector_sync_job_id` (string)*: The unique identifier for the connector sync job. +** *`error` (string)*: The error for the connector sync job error field. + +[discrete] +==== sync_job_get +Get a connector sync job. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-get[Endpoint documentation] +[source,ts] +---- +client.connector.syncJobGet({ connector_sync_job_id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`connector_sync_job_id` (string)*: The unique identifier of the connector sync job + +[discrete] +==== sync_job_list +Get all connector sync jobs. + +Get information about all stored connector sync jobs listed by their creation date in ascending order. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-list[Endpoint documentation] +[source,ts] +---- +client.connector.syncJobList({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`from` (Optional, number)*: Starting offset (default: 0) +** *`size` (Optional, number)*: Specifies a max number of results to get +** *`status` (Optional, Enum("canceling" | "canceled" | "completed" | "error" | "in_progress" | "pending" | "suspended"))*: A sync job status to fetch connector sync jobs for +** *`connector_id` (Optional, string)*: A connector id to fetch connector sync jobs for +** *`job_type` (Optional, Enum("full" | "incremental" | "access_control") | Enum("full" | "incremental" | "access_control")[])*: A list of job types to fetch the sync jobs for + +[discrete] +==== sync_job_post +Create a connector sync job. + +Create a connector sync job document in the internal index and initialize its counters and timestamps with default values. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-post[Endpoint documentation] +[source,ts] +---- +client.connector.syncJobPost({ id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`id` (string)*: The id of the associated connector +** *`job_type` (Optional, Enum("full" | "incremental" | "access_control"))* +** *`trigger_method` (Optional, Enum("on_demand" | "scheduled"))* + +[discrete] +==== sync_job_update_stats +Set the connector sync job stats. +Stats include: `deleted_document_count`, `indexed_document_count`, `indexed_document_volume`, and `total_document_count`. +You can also update `last_seen`. +This API is mainly used by the connector service for updating sync job information. + +To sync data using self-managed connectors, you need to deploy the Elastic connector service on your own infrastructure. +This service runs automatically on Elastic Cloud for Elastic managed connectors. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-update-stats[Endpoint documentation] +[source,ts] +---- +client.connector.syncJobUpdateStats({ connector_sync_job_id, deleted_document_count, indexed_document_count, indexed_document_volume }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`connector_sync_job_id` (string)*: The unique identifier of the connector sync job. +** *`deleted_document_count` (number)*: The number of documents the sync job deleted. +** *`indexed_document_count` (number)*: The number of documents the sync job indexed. +** *`indexed_document_volume` (number)*: The total size of the data (in MiB) the sync job indexed. +** *`last_seen` (Optional, string | -1 | 0)*: The timestamp to use in the `last_seen` property for the connector sync job. +** *`metadata` (Optional, Record)*: The connector-specific metadata. +** *`total_document_count` (Optional, number)*: The total number of documents in the target index after the sync job finished. + +[discrete] +==== update_active_filtering +Activate the connector draft filter. + +Activates the valid draft filtering for a connector. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-filtering[Endpoint documentation] +[source,ts] +---- +client.connector.updateActiveFiltering({ connector_id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`connector_id` (string)*: The unique identifier of the connector to be updated + +[discrete] +==== update_api_key_id +Update the connector API key ID. + +Update the `api_key_id` and `api_key_secret_id` fields of a connector. +You can specify the ID of the API key used for authorization and the ID of the connector secret where the API key is stored. +The connector secret ID is required only for Elastic managed (native) connectors. +Self-managed connectors (connector clients) do not use this field. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-api-key-id[Endpoint documentation] +[source,ts] +---- +client.connector.updateApiKeyId({ connector_id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`connector_id` (string)*: The unique identifier of the connector to be updated +** *`api_key_id` (Optional, string)* +** *`api_key_secret_id` (Optional, string)* + +[discrete] +==== update_configuration +Update the connector configuration. + +Update the configuration field in the connector document. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-configuration[Endpoint documentation] +[source,ts] +---- +client.connector.updateConfiguration({ connector_id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`connector_id` (string)*: The unique identifier of the connector to be updated +** *`configuration` (Optional, Record)* +** *`values` (Optional, Record)* + +[discrete] +==== update_error +Update the connector error field. + +Set the error field for the connector. +If the error provided in the request body is non-null, the connector’s status is updated to error. +Otherwise, if the error is reset to null, the connector status is updated to connected. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-error[Endpoint documentation] +[source,ts] +---- +client.connector.updateError({ connector_id, error }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`connector_id` (string)*: The unique identifier of the connector to be updated +** *`error` (T | null)* + +[discrete] +==== update_features +Update the connector features. +Update the connector features in the connector document. +This API can be used to control the following aspects of a connector: + +* document-level security +* incremental syncs +* advanced sync rules +* basic sync rules + +Normally, the running connector service automatically manages these features. +However, you can use this API to override the default behavior. + +To sync data using self-managed connectors, you need to deploy the Elastic connector service on your own infrastructure. +This service runs automatically on Elastic Cloud for Elastic managed connectors. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-features[Endpoint documentation] +[source,ts] +---- +client.connector.updateFeatures({ connector_id, features }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`connector_id` (string)*: The unique identifier of the connector to be updated. +** *`features` ({ document_level_security, incremental_sync, native_connector_api_keys, sync_rules })* + +[discrete] +==== update_filtering +Update the connector filtering. + +Update the draft filtering configuration of a connector and marks the draft validation state as edited. +The filtering draft is activated once validated by the running Elastic connector service. +The filtering property is used to configure sync rules (both basic and advanced) for a connector. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-filtering[Endpoint documentation] +[source,ts] +---- +client.connector.updateFiltering({ connector_id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`connector_id` (string)*: The unique identifier of the connector to be updated +** *`filtering` (Optional, { active, domain, draft }[])* +** *`rules` (Optional, { created_at, field, id, order, policy, rule, updated_at, value }[])* +** *`advanced_snippet` (Optional, { created_at, updated_at, value })* + +[discrete] +==== update_filtering_validation +Update the connector draft filtering validation. + +Update the draft filtering validation info for a connector. +[source,ts] +---- +client.connector.updateFilteringValidation({ connector_id, validation }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`connector_id` (string)*: The unique identifier of the connector to be updated +** *`validation` ({ errors, state })* + +[discrete] +==== update_index_name +Update the connector index name. + +Update the `index_name` field of a connector, specifying the index where the data ingested by the connector is stored. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-index-name[Endpoint documentation] +[source,ts] +---- +client.connector.updateIndexName({ connector_id, index_name }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`connector_id` (string)*: The unique identifier of the connector to be updated +** *`index_name` (T | null)* + +[discrete] +==== update_name +Update the connector name and description. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-name[Endpoint documentation] +[source,ts] +---- +client.connector.updateName({ connector_id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`connector_id` (string)*: The unique identifier of the connector to be updated +** *`name` (Optional, string)* +** *`description` (Optional, string)* + +[discrete] +==== update_native +Update the connector is_native flag. +[source,ts] +---- +client.connector.updateNative({ connector_id, is_native }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`connector_id` (string)*: The unique identifier of the connector to be updated +** *`is_native` (boolean)* + +[discrete] +==== update_pipeline +Update the connector pipeline. + +When you create a new connector, the configuration of an ingest pipeline is populated with default settings. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-pipeline[Endpoint documentation] +[source,ts] +---- +client.connector.updatePipeline({ connector_id, pipeline }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`connector_id` (string)*: The unique identifier of the connector to be updated +** *`pipeline` ({ extract_binary_content, name, reduce_whitespace, run_ml_inference })* + +[discrete] +==== update_scheduling +Update the connector scheduling. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-scheduling[Endpoint documentation] +[source,ts] +---- +client.connector.updateScheduling({ connector_id, scheduling }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`connector_id` (string)*: The unique identifier of the connector to be updated +** *`scheduling` ({ access_control, full, incremental })* + +[discrete] +==== update_service_type +Update the connector service type. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-service-type[Endpoint documentation] +[source,ts] +---- +client.connector.updateServiceType({ connector_id, service_type }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`connector_id` (string)*: The unique identifier of the connector to be updated +** *`service_type` (string)* + +[discrete] +==== update_status +Update the connector status. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-status[Endpoint documentation] +[source,ts] +---- +client.connector.updateStatus({ connector_id, status }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`connector_id` (string)*: The unique identifier of the connector to be updated +** *`status` (Enum("created" | "needs_configuration" | "configured" | "connected" | "error"))* + +[discrete] +=== dangling_indices +[discrete] +==== delete_dangling_index +Delete a dangling index. +If Elasticsearch encounters index data that is absent from the current cluster state, those indices are considered to be dangling. +For example, this can happen if you delete more than `cluster.indices.tombstones.size` indices while an Elasticsearch node is offline. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-dangling-indices-delete-dangling-index[Endpoint documentation] +[source,ts] +---- +client.danglingIndices.deleteDanglingIndex({ index_uuid, accept_data_loss }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`index_uuid` (string)*: The UUID of the index to delete. Use the get dangling indices API to find the UUID. +** *`accept_data_loss` (boolean)*: This parameter must be set to true to acknowledge that it will no longer be possible to recove data from the dangling index. +** *`master_timeout` (Optional, string | -1 | 0)*: Specify timeout for connection to master +** *`timeout` (Optional, string | -1 | 0)*: Explicit operation timeout + +[discrete] +==== import_dangling_index +Import a dangling index. + +If Elasticsearch encounters index data that is absent from the current cluster state, those indices are considered to be dangling. +For example, this can happen if you delete more than `cluster.indices.tombstones.size` indices while an Elasticsearch node is offline. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-dangling-indices-import-dangling-index[Endpoint documentation] +[source,ts] +---- +client.danglingIndices.importDanglingIndex({ index_uuid, accept_data_loss }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`index_uuid` (string)*: The UUID of the index to import. Use the get dangling indices API to locate the UUID. +** *`accept_data_loss` (boolean)*: This parameter must be set to true to import a dangling index. +Because Elasticsearch cannot know where the dangling index data came from or determine which shard copies are fresh and which are stale, it cannot guarantee that the imported data represents the latest state of the index when it was last in the cluster. +** *`master_timeout` (Optional, string | -1 | 0)*: Specify timeout for connection to master +** *`timeout` (Optional, string | -1 | 0)*: Explicit operation timeout + +[discrete] +==== list_dangling_indices +Get the dangling indices. + +If Elasticsearch encounters index data that is absent from the current cluster state, those indices are considered to be dangling. +For example, this can happen if you delete more than `cluster.indices.tombstones.size` indices while an Elasticsearch node is offline. + +Use this API to list dangling indices, which you can then import or delete. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-dangling-indices-list-dangling-indices[Endpoint documentation] +[source,ts] +---- +client.danglingIndices.listDanglingIndices() +---- + + +[discrete] +=== enrich +[discrete] +==== delete_policy +Delete an enrich policy. +Deletes an existing enrich policy and its enrich index. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-enrich-delete-policy[Endpoint documentation] +[source,ts] +---- +client.enrich.deletePolicy({ name }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`name` (string)*: Enrich policy to delete. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. + +[discrete] +==== execute_policy +Run an enrich policy. +Create the enrich index for an existing enrich policy. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-enrich-execute-policy[Endpoint documentation] +[source,ts] +---- +client.enrich.executePolicy({ name }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`name` (string)*: Enrich policy to execute. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. +** *`wait_for_completion` (Optional, boolean)*: If `true`, the request blocks other enrich policy execution requests until complete. + +[discrete] +==== get_policy +Get an enrich policy. +Returns information about an enrich policy. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-enrich-get-policy[Endpoint documentation] +[source,ts] +---- +client.enrich.getPolicy({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`name` (Optional, string | string[])*: List of enrich policy names used to limit the request. +To return information for all enrich policies, omit this parameter. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. + +[discrete] +==== put_policy +Create an enrich policy. +Creates an enrich policy. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-enrich-put-policy[Endpoint documentation] +[source,ts] +---- +client.enrich.putPolicy({ name }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`name` (string)*: Name of the enrich policy to create or update. +** *`geo_match` (Optional, { enrich_fields, indices, match_field, query, name, elasticsearch_version })*: Matches enrich data to incoming documents based on a `geo_shape` query. +** *`match` (Optional, { enrich_fields, indices, match_field, query, name, elasticsearch_version })*: Matches enrich data to incoming documents based on a `term` query. +** *`range` (Optional, { enrich_fields, indices, match_field, query, name, elasticsearch_version })*: Matches a number, date, or IP address in incoming documents to a range in the enrich index based on a `term` query. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. + +[discrete] +==== stats +Get enrich stats. +Returns enrich coordinator statistics and information about enrich policies that are currently executing. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-enrich-stats[Endpoint documentation] +[source,ts] +---- +client.enrich.stats({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. + +[discrete] +=== eql +[discrete] +==== delete +Delete an async EQL search. +Delete an async EQL search or a stored synchronous EQL search. +The API also deletes results for the search. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-eql-delete[Endpoint documentation] +[source,ts] +---- +client.eql.delete({ id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`id` (string)*: Identifier for the search to delete. +A search ID is provided in the EQL search API's response for an async search. +A search ID is also provided if the request’s `keep_on_completion` parameter is `true`. + +[discrete] +==== get +Get async EQL search results. +Get the current status and available results for an async EQL search or a stored synchronous EQL search. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-eql-get[Endpoint documentation] +[source,ts] +---- +client.eql.get({ id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`id` (string)*: Identifier for the search. +** *`keep_alive` (Optional, string | -1 | 0)*: Period for which the search and its results are stored on the cluster. +Defaults to the keep_alive value set by the search’s EQL search API request. +** *`wait_for_completion_timeout` (Optional, string | -1 | 0)*: Timeout duration to wait for the request to finish. +Defaults to no timeout, meaning the request waits for complete search results. + +[discrete] +==== get_status +Get the async EQL status. +Get the current status for an async EQL search or a stored synchronous EQL search without returning results. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-eql-get-status[Endpoint documentation] +[source,ts] +---- +client.eql.getStatus({ id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`id` (string)*: Identifier for the search. + +[discrete] +==== search +Get EQL search results. +Returns search results for an Event Query Language (EQL) query. +EQL assumes each document in a data stream or index corresponds to an event. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-eql-search[Endpoint documentation] +[source,ts] +---- +client.eql.search({ index, query }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (string | string[])*: The name of the index to scope the operation +** *`query` (string)*: EQL query you wish to run. +** *`case_sensitive` (Optional, boolean)* +** *`event_category_field` (Optional, string)*: Field containing the event classification, such as process, file, or network. +** *`tiebreaker_field` (Optional, string)*: Field used to sort hits with the same timestamp in ascending order +** *`timestamp_field` (Optional, string)*: Field containing event timestamp. Default "@timestamp" +** *`fetch_size` (Optional, number)*: Maximum number of events to search at a time for sequence queries. +** *`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type } | { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type }[])*: Query, written in Query DSL, used to filter the events on which the EQL query runs. +** *`keep_alive` (Optional, string | -1 | 0)* +** *`keep_on_completion` (Optional, boolean)* +** *`wait_for_completion_timeout` (Optional, string | -1 | 0)* +** *`allow_partial_search_results` (Optional, boolean)*: Allow query execution also in case of shard failures. +If true, the query will keep running and will return results based on the available shards. +For sequences, the behavior can be further refined using allow_partial_sequence_results +** *`allow_partial_sequence_results` (Optional, boolean)*: This flag applies only to sequences and has effect only if allow_partial_search_results=true. +If true, the sequence query will return results based on the available shards, ignoring the others. +If false, the sequence query will return successfully, but will always have empty results. +** *`size` (Optional, number)*: For basic queries, the maximum number of matching events to return. Defaults to 10 +** *`fields` (Optional, { field, format, include_unmapped } | { field, format, include_unmapped }[])*: Array of wildcard (*) patterns. The response returns values for field names matching these patterns in the fields property of each hit. +** *`result_position` (Optional, Enum("tail" | "head"))* +** *`runtime_mappings` (Optional, Record)* +** *`max_samples_per_key` (Optional, number)*: By default, the response of a sample query contains up to `10` samples, with one sample per unique set of join keys. Use the `size` +parameter to get a smaller or larger set of samples. To retrieve more than one sample per set of join keys, use the +`max_samples_per_key` parameter. Pipes are not supported for sample queries. +** *`allow_no_indices` (Optional, boolean)* +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])* +** *`ignore_unavailable` (Optional, boolean)*: If true, missing or closed indices are not included in the response. + +[discrete] +=== esql +[discrete] +==== async_query +Run an async ES|QL query. +Asynchronously run an ES|QL (Elasticsearch query language) query, monitor its progress, and retrieve results when they become available. + +The API accepts the same parameters and request body as the synchronous query API, along with additional async related properties. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-esql-async-query[Endpoint documentation] +[source,ts] +---- +client.esql.asyncQuery({ query }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`query` (string)*: The ES|QL query API accepts an ES|QL query string in the query parameter, runs it, and returns the results. +** *`columnar` (Optional, boolean)*: By default, ES|QL returns results as rows. For example, FROM returns each individual document as one row. For the JSON, YAML, CBOR and smile formats, ES|QL can return the results in a columnar fashion where one row represents all the values of a certain column in the results. +** *`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Specify a Query DSL query in the filter parameter to filter the set of documents that an ES|QL query runs on. +** *`locale` (Optional, string)* +** *`params` (Optional, number | number | string | boolean | null[])*: To avoid any attempts of hacking or code injection, extract the values in a separate list of parameters. Use question mark placeholders (?) in the query string for each of the parameters. +** *`profile` (Optional, boolean)*: If provided and `true` the response will include an extra `profile` object +with information on how the query was executed. This information is for human debugging +and its format can change at any time but it can give some insight into the performance +of each part of the query. +** *`tables` (Optional, Record>)*: Tables to use with the LOOKUP operation. The top level key is the table +name and the next level key is the column name. +** *`include_ccs_metadata` (Optional, boolean)*: When set to `true` and performing a cross-cluster query, the response will include an extra `_clusters` +object with information about the clusters that participated in the search along with info such as shards +count. +** *`wait_for_completion_timeout` (Optional, string | -1 | 0)*: The period to wait for the request to finish. +By default, the request waits for 1 second for the query results. +If the query completes during this period, results are returned +Otherwise, a query ID is returned that can later be used to retrieve the results. +** *`delimiter` (Optional, string)*: The character to use between values within a CSV row. +It is valid only for the CSV format. +** *`drop_null_columns` (Optional, boolean)*: Indicates whether columns that are entirely `null` will be removed from the `columns` and `values` portion of the results. +If `true`, the response will include an extra section under the name `all_columns` which has the name of all the columns. +** *`format` (Optional, Enum("csv" | "json" | "tsv" | "txt" | "yaml" | "cbor" | "smile" | "arrow"))*: A short version of the Accept header, for example `json` or `yaml`. +** *`keep_alive` (Optional, string | -1 | 0)*: The period for which the query and its results are stored in the cluster. +The default period is five days. +When this period expires, the query and its results are deleted, even if the query is still ongoing. +If the `keep_on_completion` parameter is false, Elasticsearch only stores async queries that do not complete within the period set by the `wait_for_completion_timeout` parameter, regardless of this value. +** *`keep_on_completion` (Optional, boolean)*: Indicates whether the query and its results are stored in the cluster. +If false, the query and its results are stored in the cluster only if the request does not complete during the period set by the `wait_for_completion_timeout` parameter. + +[discrete] +==== async_query_delete +Delete an async ES|QL query. +If the query is still running, it is cancelled. +Otherwise, the stored results are deleted. + +If the Elasticsearch security features are enabled, only the following users can use this API to delete a query: + +* The authenticated user that submitted the original query request +* Users with the `cancel_task` cluster privilege + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-esql-async-query-delete[Endpoint documentation] +[source,ts] +---- +client.esql.asyncQueryDelete({ id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`id` (string)*: The unique identifier of the query. +A query ID is provided in the ES|QL async query API response for a query that does not complete in the designated time. +A query ID is also provided when the request was submitted with the `keep_on_completion` parameter set to `true`. + +[discrete] +==== async_query_get +Get async ES|QL query results. +Get the current status and available results or stored results for an ES|QL asynchronous query. +If the Elasticsearch security features are enabled, only the user who first submitted the ES|QL query can retrieve the results using this API. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-esql-async-query-get[Endpoint documentation] +[source,ts] +---- +client.esql.asyncQueryGet({ id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`id` (string)*: The unique identifier of the query. +A query ID is provided in the ES|QL async query API response for a query that does not complete in the designated time. +A query ID is also provided when the request was submitted with the `keep_on_completion` parameter set to `true`. +** *`drop_null_columns` (Optional, boolean)*: Indicates whether columns that are entirely `null` will be removed from the `columns` and `values` portion of the results. +If `true`, the response will include an extra section under the name `all_columns` which has the name of all the columns. +** *`keep_alive` (Optional, string | -1 | 0)*: The period for which the query and its results are stored in the cluster. +When this period expires, the query and its results are deleted, even if the query is still ongoing. +** *`wait_for_completion_timeout` (Optional, string | -1 | 0)*: The period to wait for the request to finish. +By default, the request waits for complete query results. +If the request completes during the period specified in this parameter, complete query results are returned. +Otherwise, the response returns an `is_running` value of `true` and no results. + +[discrete] +==== async_query_stop +Stop async ES|QL query. + +This API interrupts the query execution and returns the results so far. +If the Elasticsearch security features are enabled, only the user who first submitted the ES|QL query can stop it. + +{ref}/esql-async-query-stop-api.html[Endpoint documentation] +[source,ts] +---- +client.esql.asyncQueryStop({ id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`id` (string)*: The unique identifier of the query. +A query ID is provided in the ES|QL async query API response for a query that does not complete in the designated time. +A query ID is also provided when the request was submitted with the `keep_on_completion` parameter set to `true`. +** *`drop_null_columns` (Optional, boolean)*: Indicates whether columns that are entirely `null` will be removed from the `columns` and `values` portion of the results. +If `true`, the response will include an extra section under the name `all_columns` which has the name of all the columns. + +[discrete] +==== query +Run an ES|QL query. +Get search results for an ES|QL (Elasticsearch query language) query. + +{ref}/esql-rest.html[Endpoint documentation] +[source,ts] +---- +client.esql.query({ query }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`query` (string)*: The ES|QL query API accepts an ES|QL query string in the query parameter, runs it, and returns the results. +** *`columnar` (Optional, boolean)*: By default, ES|QL returns results as rows. For example, FROM returns each individual document as one row. For the JSON, YAML, CBOR and smile formats, ES|QL can return the results in a columnar fashion where one row represents all the values of a certain column in the results. +** *`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Specify a Query DSL query in the filter parameter to filter the set of documents that an ES|QL query runs on. +** *`locale` (Optional, string)* +** *`params` (Optional, number | number | string | boolean | null[])*: To avoid any attempts of hacking or code injection, extract the values in a separate list of parameters. Use question mark placeholders (?) in the query string for each of the parameters. +** *`profile` (Optional, boolean)*: If provided and `true` the response will include an extra `profile` object +with information on how the query was executed. This information is for human debugging +and its format can change at any time but it can give some insight into the performance +of each part of the query. +** *`tables` (Optional, Record>)*: Tables to use with the LOOKUP operation. The top level key is the table +name and the next level key is the column name. +** *`include_ccs_metadata` (Optional, boolean)*: When set to `true` and performing a cross-cluster query, the response will include an extra `_clusters` +object with information about the clusters that participated in the search along with info such as shards +count. +** *`format` (Optional, Enum("csv" | "json" | "tsv" | "txt" | "yaml" | "cbor" | "smile" | "arrow"))*: A short version of the Accept header, e.g. json, yaml. +** *`delimiter` (Optional, string)*: The character to use between values within a CSV row. Only valid for the CSV format. +** *`drop_null_columns` (Optional, boolean)*: Should columns that are entirely `null` be removed from the `columns` and `values` portion of the results? +Defaults to `false`. If `true` then the response will include an extra section under the name `all_columns` which has the name of all columns. + +[discrete] +=== features +[discrete] +==== get_features +Get the features. +Get a list of features that can be included in snapshots using the `feature_states` field when creating a snapshot. +You can use this API to determine which feature states to include when taking a snapshot. +By default, all feature states are included in a snapshot if that snapshot includes the global state, or none if it does not. + +A feature state includes one or more system indices necessary for a given feature to function. +In order to ensure data integrity, all system indices that comprise a feature state are snapshotted and restored together. + +The features listed by this API are a combination of built-in features and features defined by plugins. +In order for a feature state to be listed in this API and recognized as a valid feature state by the create snapshot API, the plugin that defines that feature must be installed on the master node. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-features-get-features[Endpoint documentation] +[source,ts] +---- +client.features.getFeatures({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. + +[discrete] +==== reset_features +Reset the features. +Clear all of the state information stored in system indices by Elasticsearch features, including the security and machine learning indices. + +WARNING: Intended for development and testing use only. Do not reset features on a production cluster. + +Return a cluster to the same state as a new installation by resetting the feature state for all Elasticsearch features. +This deletes all state information stored in system indices. + +The response code is HTTP 200 if the state is successfully reset for all features. +It is HTTP 500 if the reset operation failed for any feature. + +Note that select features might provide a way to reset particular system indices. +Using this API resets all features, both those that are built-in and implemented as plugins. + +To list the features that will be affected, use the get features API. + +IMPORTANT: The features installed on the node you submit this request to are the features that will be reset. Run on the master node if you have any doubts about which plugins are installed on individual nodes. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-features-reset-features[Endpoint documentation] +[source,ts] +---- +client.features.resetFeatures({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. + +[discrete] +=== fleet +[discrete] +==== global_checkpoints +Get global checkpoints. + +Get the current global checkpoints for an index. +This API is designed for internal use by the Fleet server project. + +https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-fleet[Endpoint documentation] +[source,ts] +---- +client.fleet.globalCheckpoints({ index }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (string | string)*: A single index or index alias that resolves to a single index. +** *`wait_for_advance` (Optional, boolean)*: A boolean value which controls whether to wait (until the timeout) for the global checkpoints +to advance past the provided `checkpoints`. +** *`wait_for_index` (Optional, boolean)*: A boolean value which controls whether to wait (until the timeout) for the target index to exist +and all primary shards be active. Can only be true when `wait_for_advance` is true. +** *`checkpoints` (Optional, number[])*: A comma separated list of previous global checkpoints. When used in combination with `wait_for_advance`, +the API will only return once the global checkpoints advances past the checkpoints. Providing an empty list +will cause Elasticsearch to immediately return the current global checkpoints. +** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a global checkpoints to advance past `checkpoints`. + +[discrete] +==== msearch +Run multiple Fleet searches. +Run several Fleet searches with a single API request. +The API follows the same structure as the multi search API. +However, similar to the Fleet search API, it supports the `wait_for_checkpoints` parameter. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-fleet-msearch[Endpoint documentation] +[source,ts] +---- +client.fleet.msearch({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (Optional, string | string)*: A single target to search. If the target is an index alias, it must resolve to a single index. +** *`searches` (Optional, { allow_no_indices, expand_wildcards, ignore_unavailable, index, preference, request_cache, routing, search_type, ccs_minimize_roundtrips, allow_partial_search_results, ignore_throttled } | { aggregations, collapse, query, explain, ext, stored_fields, docvalue_fields, knn, from, highlight, indices_boost, min_score, post_filter, profile, rescore, script_fields, search_after, size, sort, _source, fields, terminate_after, stats, timeout, track_scores, track_total_hits, version, runtime_mappings, seq_no_primary_term, pit, suggest }[])* +** *`allow_no_indices` (Optional, boolean)*: If false, the request returns an error if any wildcard expression, index alias, or _all value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting foo*,bar* returns an error if an index starts with foo but no index starts with bar. +** *`ccs_minimize_roundtrips` (Optional, boolean)*: If true, network roundtrips between the coordinating node and remote clusters are minimized for cross-cluster search requests. +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard expressions can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +** *`ignore_throttled` (Optional, boolean)*: If true, concrete, expanded or aliased indices are ignored when frozen. +** *`ignore_unavailable` (Optional, boolean)*: If true, missing or closed indices are not included in the response. +** *`max_concurrent_searches` (Optional, number)*: Maximum number of concurrent searches the multi search API can execute. +** *`max_concurrent_shard_requests` (Optional, number)*: Maximum number of concurrent shard requests that each sub-search request executes per node. +** *`pre_filter_shard_size` (Optional, number)*: Defines a threshold that enforces a pre-filter roundtrip to prefilter search shards based on query rewriting if the number of shards the search request expands to exceeds the threshold. This filter roundtrip can limit the number of shards significantly if for instance a shard can not match any documents based on its rewrite method i.e., if date filters are mandatory to match but the shard bounds and the query are disjoint. +** *`search_type` (Optional, Enum("query_then_fetch" | "dfs_query_then_fetch"))*: Indicates whether global term and document frequencies should be used when scoring returned documents. +** *`rest_total_hits_as_int` (Optional, boolean)*: If true, hits.total are returned as an integer in the response. Defaults to false, which returns an object. +** *`typed_keys` (Optional, boolean)*: Specifies whether aggregation and suggester names should be prefixed by their respective types in the response. +** *`wait_for_checkpoints` (Optional, number[])*: A comma separated list of checkpoints. When configured, the search API will only be executed on a shard +after the relevant checkpoint has become visible for search. Defaults to an empty list which will cause +Elasticsearch to immediately execute the search. +** *`allow_partial_search_results` (Optional, boolean)*: If true, returns partial results if there are shard request timeouts or [shard failures](https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-replication.html#shard-failures). If false, returns +an error with no partial results. Defaults to the configured cluster setting `search.default_allow_partial_results` +which is true by default. + +[discrete] +==== search +Run a Fleet search. +The purpose of the Fleet search API is to provide an API where the search will be run only +after the provided checkpoint has been processed and is visible for searches inside of Elasticsearch. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-fleet-search[Endpoint documentation] +[source,ts] +---- +client.fleet.search({ index }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (string | string)*: A single target to search. If the target is an index alias, it must resolve to a single index. +** *`aggregations` (Optional, Record)* +** *`collapse` (Optional, { field, inner_hits, max_concurrent_group_searches, collapse })* +** *`explain` (Optional, boolean)*: If true, returns detailed information about score computation as part of a hit. +** *`ext` (Optional, Record)*: Configuration of search extensions defined by Elasticsearch plugins. +** *`from` (Optional, number)*: Starting document offset. By default, you cannot page through more than 10,000 +hits using the from and size parameters. To page through more hits, use the +search_after parameter. +** *`highlight` (Optional, { encoder, fields })* +** *`track_total_hits` (Optional, boolean | number)*: Number of hits matching the query to count accurately. If true, the exact +number of hits is returned at the cost of some performance. If false, the +response does not include the total number of hits matching the query. +Defaults to 10,000 hits. +** *`indices_boost` (Optional, Record[])*: Boosts the _score of documents from specified indices. +** *`docvalue_fields` (Optional, { field, format, include_unmapped }[])*: Array of wildcard (*) patterns. The request returns doc values for field +names matching these patterns in the hits.fields property of the response. +** *`min_score` (Optional, number)*: Minimum _score for matching documents. Documents with a lower _score are +not included in the search results. +** *`post_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })* +** *`profile` (Optional, boolean)* +** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Defines the search definition using the Query DSL. +** *`rescore` (Optional, { window_size, query, learning_to_rank } | { window_size, query, learning_to_rank }[])* +** *`script_fields` (Optional, Record)*: Retrieve a script evaluation (based on different fields) for each hit. +** *`search_after` (Optional, number | number | string | boolean | null[])* +** *`size` (Optional, number)*: The number of hits to return. By default, you cannot page through more +than 10,000 hits using the from and size parameters. To page through more +hits, use the search_after parameter. +** *`slice` (Optional, { field, id, max })* +** *`sort` (Optional, string | { _score, _doc, _geo_distance, _script } | string | { _score, _doc, _geo_distance, _script }[])* +** *`_source` (Optional, boolean | { excludes, includes })*: Indicates which source fields are returned for matching documents. These +fields are returned in the hits._source property of the search response. +** *`fields` (Optional, { field, format, include_unmapped }[])*: Array of wildcard (*) patterns. The request returns values for field names +matching these patterns in the hits.fields property of the response. +** *`suggest` (Optional, { text })* +** *`terminate_after` (Optional, number)*: Maximum number of documents to collect for each shard. If a query reaches this +limit, Elasticsearch terminates the query early. Elasticsearch collects documents +before sorting. Defaults to 0, which does not terminate query execution early. +** *`timeout` (Optional, string)*: Specifies the period of time to wait for a response from each shard. If no response +is received before the timeout expires, the request fails and returns an error. +Defaults to no timeout. +** *`track_scores` (Optional, boolean)*: If true, calculate and return document scores, even if the scores are not used for sorting. +** *`version` (Optional, boolean)*: If true, returns document version as part of a hit. +** *`seq_no_primary_term` (Optional, boolean)*: If true, returns sequence number and primary term of the last modification +of each hit. See Optimistic concurrency control. +** *`stored_fields` (Optional, string | string[])*: List of stored fields to return as part of a hit. If no fields are specified, +no stored fields are included in the response. If this field is specified, the _source +parameter defaults to false. You can pass _source: true to return both source fields +and stored fields in the search response. +** *`pit` (Optional, { id, keep_alive })*: Limits the search to a point in time (PIT). If you provide a PIT, you +cannot specify an in the request path. +** *`runtime_mappings` (Optional, Record)*: Defines one or more runtime fields in the search request. These fields take +precedence over mapped fields with the same name. +** *`stats` (Optional, string[])*: Stats groups to associate with the search. Each group maintains a statistics +aggregation for its associated searches. You can retrieve these stats using +the indices stats API. +** *`allow_no_indices` (Optional, boolean)* +** *`analyzer` (Optional, string)* +** *`analyze_wildcard` (Optional, boolean)* +** *`batched_reduce_size` (Optional, number)* +** *`ccs_minimize_roundtrips` (Optional, boolean)* +** *`default_operator` (Optional, Enum("and" | "or"))* +** *`df` (Optional, string)* +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])* +** *`ignore_throttled` (Optional, boolean)* +** *`ignore_unavailable` (Optional, boolean)* +** *`lenient` (Optional, boolean)* +** *`max_concurrent_shard_requests` (Optional, number)* +** *`preference` (Optional, string)* +** *`pre_filter_shard_size` (Optional, number)* +** *`request_cache` (Optional, boolean)* +** *`routing` (Optional, string)* +** *`scroll` (Optional, string | -1 | 0)* +** *`search_type` (Optional, Enum("query_then_fetch" | "dfs_query_then_fetch"))* +** *`suggest_field` (Optional, string)*: Specifies which field to use for suggestions. +** *`suggest_mode` (Optional, Enum("missing" | "popular" | "always"))* +** *`suggest_size` (Optional, number)* +** *`suggest_text` (Optional, string)*: The source text for which the suggestions should be returned. +** *`typed_keys` (Optional, boolean)* +** *`rest_total_hits_as_int` (Optional, boolean)* +** *`_source_excludes` (Optional, string | string[])* +** *`_source_includes` (Optional, string | string[])* +** *`q` (Optional, string)* +** *`wait_for_checkpoints` (Optional, number[])*: A comma separated list of checkpoints. When configured, the search API will only be executed on a shard +after the relevant checkpoint has become visible for search. Defaults to an empty list which will cause +Elasticsearch to immediately execute the search. +** *`allow_partial_search_results` (Optional, boolean)*: If true, returns partial results if there are shard request timeouts or [shard failures](https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-replication.html#shard-failures). If false, returns +an error with no partial results. Defaults to the configured cluster setting `search.default_allow_partial_results` +which is true by default. + +[discrete] +=== graph +[discrete] +==== explore +Explore graph analytics. +Extract and summarize information about the documents and terms in an Elasticsearch data stream or index. +The easiest way to understand the behavior of this API is to use the Graph UI to explore connections. +An initial request to the `_explore` API contains a seed query that identifies the documents of interest and specifies the fields that define the vertices and connections you want to include in the graph. +Subsequent requests enable you to spider out from one more vertices of interest. +You can exclude vertices that have already been returned. + +https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-graph[Endpoint documentation] +[source,ts] +---- +client.graph.explore({ index }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (string | string[])*: Name of the index. +** *`connections` (Optional, { connections, query, vertices })*: Specifies or more fields from which you want to extract terms that are associated with the specified vertices. +** *`controls` (Optional, { sample_diversity, sample_size, timeout, use_significance })*: Direct the Graph API how to build the graph. +** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: A seed query that identifies the documents of interest. Can be any valid Elasticsearch query. +** *`vertices` (Optional, { exclude, field, include, min_doc_count, shard_min_doc_count, size }[])*: Specifies one or more fields that contain the terms you want to include in the graph as vertices. +** *`routing` (Optional, string)*: Custom value used to route operations to a specific shard. +** *`timeout` (Optional, string | -1 | 0)*: Specifies the period of time to wait for a response from each shard. +If no response is received before the timeout expires, the request fails and returns an error. +Defaults to no timeout. + +[discrete] +=== ilm +[discrete] +==== delete_lifecycle +Delete a lifecycle policy. +You cannot delete policies that are currently in use. If the policy is being used to manage any indices, the request fails and returns an error. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-delete-lifecycle[Endpoint documentation] +[source,ts] +---- +client.ilm.deleteLifecycle({ policy }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`policy` (string)*: Identifier for the policy. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + +[discrete] +==== explain_lifecycle +Explain the lifecycle state. +Get the current lifecycle status for one or more indices. +For data streams, the API retrieves the current lifecycle status for the stream's backing indices. + +The response indicates when the index entered each lifecycle state, provides the definition of the running phase, and information about any failures. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-explain-lifecycle[Endpoint documentation] +[source,ts] +---- +client.ilm.explainLifecycle({ index }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (string)*: List of data streams, indices, and aliases to target. Supports wildcards (`*`). +To target all data streams and indices, use `*` or `_all`. +** *`only_errors` (Optional, boolean)*: Filters the returned indices to only indices that are managed by ILM and are in an error state, either due to an encountering an error while executing the policy, or attempting to use a policy that does not exist. +** *`only_managed` (Optional, boolean)*: Filters the returned indices to only indices that are managed by ILM. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. + +[discrete] +==== get_lifecycle +Get lifecycle policies. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-get-lifecycle[Endpoint documentation] +[source,ts] +---- +client.ilm.getLifecycle({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`policy` (Optional, string)*: Identifier for the policy. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + +[discrete] +==== get_status +Get the ILM status. + +Get the current index lifecycle management status. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-get-status[Endpoint documentation] +[source,ts] +---- +client.ilm.getStatus() +---- + + +[discrete] +==== migrate_to_data_tiers +Migrate to data tiers routing. +Switch the indices, ILM policies, and legacy, composable, and component templates from using custom node attributes and attribute-based allocation filters to using data tiers. +Optionally, delete one legacy index template. +Using node roles enables ILM to automatically move the indices between data tiers. + +Migrating away from custom node attributes routing can be manually performed. +This API provides an automated way of performing three out of the four manual steps listed in the migration guide: + +. Stop setting the custom hot attribute on new indices. +. Remove custom allocation settings from existing ILM policies. +. Replace custom allocation settings from existing indices with the corresponding tier preference. + +ILM must be stopped before performing the migration. +Use the stop ILM and get ILM status APIs to wait until the reported operation mode is `STOPPED`. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-migrate-to-data-tiers[Endpoint documentation] +[source,ts] +---- +client.ilm.migrateToDataTiers({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`legacy_template_to_delete` (Optional, string)* +** *`node_attribute` (Optional, string)* +** *`dry_run` (Optional, boolean)*: If true, simulates the migration from node attributes based allocation filters to data tiers, but does not perform the migration. +This provides a way to retrieve the indices and ILM policies that need to be migrated. +** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +It can also be set to `-1` to indicate that the request should never timeout. + +[discrete] +==== move_to_step +Move to a lifecycle step. +Manually move an index into a specific step in the lifecycle policy and run that step. + +WARNING: This operation can result in the loss of data. Manually moving an index into a specific step runs that step even if it has already been performed. This is a potentially destructive action and this should be considered an expert level API. + +You must specify both the current step and the step to be executed in the body of the request. +The request will fail if the current step does not match the step currently running for the index +This is to prevent the index from being moved from an unexpected step into the next step. + +When specifying the target (`next_step`) to which the index will be moved, either the name or both the action and name fields are optional. +If only the phase is specified, the index will move to the first step of the first action in the target phase. +If the phase and action are specified, the index will move to the first step of the specified action in the specified phase. +Only actions specified in the ILM policy are considered valid. +An index cannot move to a step that is not part of its policy. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-move-to-step[Endpoint documentation] +[source,ts] +---- +client.ilm.moveToStep({ index, current_step, next_step }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (string)*: The name of the index whose lifecycle step is to change +** *`current_step` ({ action, name, phase })*: The step that the index is expected to be in. +** *`next_step` ({ action, name, phase })*: The step that you want to run. + +[discrete] +==== put_lifecycle +Create or update a lifecycle policy. +If the specified policy exists, it is replaced and the policy version is incremented. + +NOTE: Only the latest version of the policy is stored, you cannot revert to previous versions. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-put-lifecycle[Endpoint documentation] +[source,ts] +---- +client.ilm.putLifecycle({ policy }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`policy` (string)*: Identifier for the policy. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + +[discrete] +==== remove_policy +Remove policies from an index. +Remove the assigned lifecycle policies from an index or a data stream's backing indices. +It also stops managing the indices. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-remove-policy[Endpoint documentation] +[source,ts] +---- +client.ilm.removePolicy({ index }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (string)*: The name of the index to remove policy on + +[discrete] +==== retry +Retry a policy. +Retry running the lifecycle policy for an index that is in the ERROR step. +The API sets the policy back to the step where the error occurred and runs the step. +Use the explain lifecycle state API to determine whether an index is in the ERROR step. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-retry[Endpoint documentation] +[source,ts] +---- +client.ilm.retry({ index }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (string)*: The name of the indices (comma-separated) whose failed lifecycle step is to be retry + +[discrete] +==== start +Start the ILM plugin. +Start the index lifecycle management plugin if it is currently stopped. +ILM is started automatically when the cluster is formed. +Restarting ILM is necessary only when it has been stopped using the stop ILM API. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-start[Endpoint documentation] +[source,ts] +---- +client.ilm.start({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + +[discrete] +==== stop +Stop the ILM plugin. +Halt all lifecycle management operations and stop the index lifecycle management plugin. +This is useful when you are performing maintenance on the cluster and need to prevent ILM from performing any actions on your indices. + +The API returns as soon as the stop request has been acknowledged, but the plugin might continue to run until in-progress operations complete and the plugin can be safely stopped. +Use the get ILM status API to check whether ILM is running. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-stop[Endpoint documentation] +[source,ts] +---- +client.ilm.stop({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + +[discrete] +=== indices +[discrete] +==== add_block +Add an index block. + +Add an index block to an index. +Index blocks limit the operations allowed on an index by blocking specific operation types. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-add-block[Endpoint documentation] +[source,ts] +---- +client.indices.addBlock({ index, block }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (string)*: A list or wildcard expression of index names used to limit the request. +By default, you must explicitly name the indices you are adding blocks to. +To allow the adding of blocks to indices with `_all`, `*`, or other wildcard expressions, change the `action.destructive_requires_name` setting to `false`. +You can update this setting in the `elasticsearch.yml` file or by using the cluster update settings API. +** *`block` (Enum("metadata" | "read" | "read_only" | "write"))*: The block type to add to the index. +** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. +This behavior applies even if the request targets other open indices. +For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: The type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +It supports a list of values, such as `open,hidden`. +** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error if it targets a missing or closed index. +** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for the master node. +If the master node is not available before the timeout expires, the request fails and returns an error. +It can also be set to `-1` to indicate that the request should never timeout. +** *`timeout` (Optional, string | -1 | 0)*: The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. +If no response is received before the timeout expires, the cluster metadata update still applies but the response will indicate that it was not completely acknowledged. +It can also be set to `-1` to indicate that the request should never timeout. + +[discrete] +==== analyze +Get tokens from text analysis. +The analyze API performs analysis on a text string and returns the resulting tokens. + +Generating excessive amount of tokens may cause a node to run out of memory. +The `index.analyze.max_token_count` setting enables you to limit the number of tokens that can be produced. +If more than this limit of tokens gets generated, an error occurs. +The `_analyze` endpoint without a specified index will always use `10000` as its limit. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-analyze[Endpoint documentation] +[source,ts] +---- +client.indices.analyze({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (Optional, string)*: Index used to derive the analyzer. +If specified, the `analyzer` or field parameter overrides this value. +If no index is specified or the index does not have a default analyzer, the analyze API uses the standard analyzer. +** *`analyzer` (Optional, string)*: The name of the analyzer that should be applied to the provided `text`. +This could be a built-in analyzer, or an analyzer that’s been configured in the index. +** *`attributes` (Optional, string[])*: Array of token attributes used to filter the output of the `explain` parameter. +** *`char_filter` (Optional, string | { type, escaped_tags } | { type, mappings, mappings_path } | { type, flags, pattern, replacement } | { type, mode, name } | { type, normalize_kana, normalize_kanji }[])*: Array of character filters used to preprocess characters before the tokenizer. +** *`explain` (Optional, boolean)*: If `true`, the response includes token attributes and additional details. +** *`field` (Optional, string)*: Field used to derive the analyzer. +To use this parameter, you must specify an index. +If specified, the `analyzer` parameter overrides this value. +** *`filter` (Optional, string | { type, preserve_original } | { type, common_words, common_words_path, ignore_case, query_mode } | { type, filter, script } | { type, delimiter, encoding } | { type, max_gram, min_gram, side, preserve_original } | { type, articles, articles_path, articles_case } | { type, max_output_size, separator } | { type, dedup, dictionary, locale, longest_only } | { type } | { type, mode, types } | { type, keep_words, keep_words_case, keep_words_path } | { type, ignore_case, keywords, keywords_path, keywords_pattern } | { type } | { type, max, min } | { type, consume_all_tokens, max_token_count } | { type, language } | { type, filters, preserve_original } | { type, max_gram, min_gram, preserve_original } | { type, stoptags } | { type, patterns, preserve_original } | { type, all, flags, pattern, replacement } | { type } | { type, script } | { type } | { type } | { type, filler_token, max_shingle_size, min_shingle_size, output_unigrams, output_unigrams_if_no_shingles, token_separator } | { type, language } | { type, rules, rules_path } | { type, language } | { type, ignore_case, remove_trailing, stopwords, stopwords_path } | { type, expand, format, lenient, synonyms, synonyms_path, synonyms_set, tokenizer, updateable } | { type, expand, format, lenient, synonyms, synonyms_path, synonyms_set, tokenizer, updateable } | { type } | { type, length } | { type, only_on_same_position } | { type } | { type, adjust_offsets, catenate_all, catenate_numbers, catenate_words, generate_number_parts, generate_word_parts, ignore_keywords, preserve_original, protected_words, protected_words_path, split_on_case_change, split_on_numerics, stem_english_possessive, type_table, type_table_path } | { type, catenate_all, catenate_numbers, catenate_words, generate_number_parts, generate_word_parts, preserve_original, protected_words, protected_words_path, split_on_case_change, split_on_numerics, stem_english_possessive, type_table, type_table_path } | { type, minimum_length } | { type, use_romaji } | { type, stoptags } | { type, alternate, case_first, case_level, country, decomposition, hiragana_quaternary_mode, language, numeric, rules, strength, variable_top, variant } | { type, unicode_set_filter } | { type, name } | { type, dir, id } | { type, encoder, languageset, max_code_len, name_type, replace, rule_type } | { type }[])*: Array of token filters used to apply after the tokenizer. +** *`normalizer` (Optional, string)*: Normalizer to use to convert text into a single token. +** *`text` (Optional, string | string[])*: Text to analyze. +If an array of strings is provided, it is analyzed as a multi-value field. +** *`tokenizer` (Optional, string | { type, tokenize_on_chars, max_token_length } | { type, max_token_length } | { type, custom_token_chars, max_gram, min_gram, token_chars } | { type, buffer_size } | { type } | { type } | { type, custom_token_chars, max_gram, min_gram, token_chars } | { type, buffer_size, delimiter, replacement, reverse, skip } | { type, flags, group, pattern } | { type, pattern } | { type, pattern } | { type, max_token_length } | { type } | { type, max_token_length } | { type, max_token_length } | { type, rule_files } | { type, discard_punctuation, mode, nbest_cost, nbest_examples, user_dictionary, user_dictionary_rules, discard_compound_token } | { type, decompound_mode, discard_punctuation, user_dictionary, user_dictionary_rules })*: Tokenizer to use to convert text into tokens. + +[discrete] +==== cancel_migrate_reindex +Cancel a migration reindex operation. + +Cancel a migration reindex attempt for a data stream or index. +[source,ts] +---- +client.indices.cancelMigrateReindex({ index }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (string | string[])*: The index or data stream name + +[discrete] +==== clear_cache +Clear the cache. +Clear the cache of one or more indices. +For data streams, the API clears the caches of the stream's backing indices. + +By default, the clear cache API clears all caches. +To clear only specific caches, use the `fielddata`, `query`, or `request` parameters. +To clear the cache only of specific fields, use the `fields` parameter. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-clear-cache[Endpoint documentation] +[source,ts] +---- +client.indices.clearCache({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (Optional, string | string[])*: List of data streams, indices, and aliases used to limit the request. +Supports wildcards (`*`). +To target all data streams and indices, omit this parameter or use `*` or `_all`. +** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. +This behavior applies even if the request targets other open indices. +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +Supports a list of values, such as `open,hidden`. +Valid values are: `all`, `open`, `closed`, `hidden`, `none`. +** *`fielddata` (Optional, boolean)*: If `true`, clears the fields cache. +Use the `fields` parameter to clear the cache of specific fields only. +** *`fields` (Optional, string | string[])*: List of field names used to limit the `fielddata` parameter. +** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error if it targets a missing or closed index. +** *`query` (Optional, boolean)*: If `true`, clears the query cache. +** *`request` (Optional, boolean)*: If `true`, clears the request cache. + +[discrete] +==== clone +Clone an index. +Clone an existing index into a new index. +Each original primary shard is cloned into a new primary shard in the new index. + +IMPORTANT: Elasticsearch does not apply index templates to the resulting index. +The API also does not copy index metadata from the original index. +Index metadata includes aliases, index lifecycle management phase definitions, and cross-cluster replication (CCR) follower information. +For example, if you clone a CCR follower index, the resulting clone will not be a follower index. + +The clone API copies most index settings from the source index to the resulting index, with the exception of `index.number_of_replicas` and `index.auto_expand_replicas`. +To set the number of replicas in the resulting index, configure these settings in the clone request. + +Cloning works as follows: + +* First, it creates a new target index with the same definition as the source index. +* Then it hard-links segments from the source index into the target index. If the file system does not support hard-linking, all segments are copied into the new index, which is a much more time consuming process. +* Finally, it recovers the target index as though it were a closed index which had just been re-opened. + +IMPORTANT: Indices can only be cloned if they meet the following requirements: + +* The index must be marked as read-only and have a cluster health status of green. +* The target index must not exist. +* The source index must have the same number of primary shards as the target index. +* The node handling the clone process must have sufficient free disk space to accommodate a second copy of the existing index. + +The current write index on a data stream cannot be cloned. +In order to clone the current write index, the data stream must first be rolled over so that a new write index is created and then the previous write index can be cloned. + +NOTE: Mappings cannot be specified in the `_clone` request. The mappings of the source index will be used for the target index. + +**Monitor the cloning process** + +The cloning process can be monitored with the cat recovery API or the cluster health API can be used to wait until all primary shards have been allocated by setting the `wait_for_status` parameter to `yellow`. + +The `_clone` API returns as soon as the target index has been added to the cluster state, before any shards have been allocated. +At this point, all shards are in the state unassigned. +If, for any reason, the target index can't be allocated, its primary shard will remain unassigned until it can be allocated on that node. + +Once the primary shard is allocated, it moves to state initializing, and the clone process begins. +When the clone operation completes, the shard will become active. +At that point, Elasticsearch will try to allocate any replicas and may decide to relocate the primary shard to another node. + +**Wait for active shards** + +Because the clone operation creates a new index to clone the shards to, the wait for active shards setting on index creation applies to the clone index action as well. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-clone[Endpoint documentation] +[source,ts] +---- +client.indices.clone({ index, target }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (string)*: Name of the source index to clone. +** *`target` (string)*: Name of the target index to create. +** *`aliases` (Optional, Record)*: Aliases for the resulting index. +** *`settings` (Optional, Record)*: Configuration options for the target index. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. +** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: The number of shard copies that must be active before proceeding with the operation. +Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). + +[discrete] +==== close +Close an index. +A closed index is blocked for read or write operations and does not allow all operations that opened indices allow. +It is not possible to index documents or to search for documents in a closed index. +Closed indices do not have to maintain internal data structures for indexing or searching documents, which results in a smaller overhead on the cluster. + +When opening or closing an index, the master node is responsible for restarting the index shards to reflect the new state of the index. +The shards will then go through the normal recovery process. +The data of opened and closed indices is automatically replicated by the cluster to ensure that enough shard copies are safely kept around at all times. + +You can open and close multiple indices. +An error is thrown if the request explicitly refers to a missing index. +This behaviour can be turned off using the `ignore_unavailable=true` parameter. + +By default, you must explicitly name the indices you are opening or closing. +To open or close indices with `_all`, `*`, or other wildcard expressions, change the` action.destructive_requires_name` setting to `false`. This setting can also be changed with the cluster update settings API. + +Closed indices consume a significant amount of disk-space which can cause problems in managed environments. +Closing indices can be turned off with the cluster settings API by setting `cluster.indices.close.enable` to `false`. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-close[Endpoint documentation] +[source,ts] +---- +client.indices.close({ index }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (string | string[])*: List or wildcard expression of index names used to limit the request. +** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. +This behavior applies even if the request targets other open indices. +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +Supports a list of values, such as `open,hidden`. +Valid values are: `all`, `open`, `closed`, `hidden`, `none`. +** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error if it targets a missing or closed index. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. +** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: The number of shard copies that must be active before proceeding with the operation. +Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). + +[discrete] +==== create +Create an index. +You can use the create index API to add a new index to an Elasticsearch cluster. +When creating an index, you can specify the following: + +* Settings for the index. +* Mappings for fields in the index. +* Index aliases + +**Wait for active shards** + +By default, index creation will only return a response to the client when the primary copies of each shard have been started, or the request times out. +The index creation response will indicate what happened. +For example, `acknowledged` indicates whether the index was successfully created in the cluster, `while shards_acknowledged` indicates whether the requisite number of shard copies were started for each shard in the index before timing out. +Note that it is still possible for either `acknowledged` or `shards_acknowledged` to be `false`, but for the index creation to be successful. +These values simply indicate whether the operation completed before the timeout. +If `acknowledged` is false, the request timed out before the cluster state was updated with the newly created index, but it probably will be created sometime soon. +If `shards_acknowledged` is false, then the request timed out before the requisite number of shards were started (by default just the primaries), even if the cluster state was successfully updated to reflect the newly created index (that is to say, `acknowledged` is `true`). + +You can change the default of only waiting for the primary shards to start through the index setting `index.write.wait_for_active_shards`. +Note that changing this setting will also affect the `wait_for_active_shards` value on all subsequent write operations. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-create[Endpoint documentation] +[source,ts] +---- +client.indices.create({ index }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (string)*: Name of the index you wish to create. +** *`aliases` (Optional, Record)*: Aliases for the index. +** *`mappings` (Optional, { all_field, date_detection, dynamic, dynamic_date_formats, dynamic_templates, _field_names, index_field, _meta, numeric_detection, properties, _routing, _size, _source, runtime, enabled, subobjects, _data_stream_timestamp })*: Mapping for fields in the index. If specified, this mapping can include: +- Field names +- Field data types +- Mapping parameters +** *`settings` (Optional, { index, mode, routing_path, soft_deletes, sort, number_of_shards, number_of_replicas, number_of_routing_shards, check_on_startup, codec, routing_partition_size, load_fixed_bitset_filters_eagerly, hidden, auto_expand_replicas, merge, search, refresh_interval, max_result_window, max_inner_result_window, max_rescore_window, max_docvalue_fields_search, max_script_fields, max_ngram_diff, max_shingle_diff, blocks, max_refresh_listeners, analyze, highlight, max_terms_count, max_regex_length, routing, gc_deletes, default_pipeline, final_pipeline, lifecycle, provided_name, creation_date, creation_date_string, uuid, version, verified_before_close, format, max_slices_per_scroll, translog, query_string, priority, top_metrics_max_size, analysis, settings, time_series, queries, similarity, mapping, indexing.slowlog, indexing_pressure, store })*: Configuration options for the index. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. +** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: The number of shard copies that must be active before proceeding with the operation. +Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). + +[discrete] +==== create_data_stream +Create a data stream. + +You must have a matching index template with data stream enabled. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-create-data-stream[Endpoint documentation] +[source,ts] +---- +client.indices.createDataStream({ name }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`name` (string)*: Name of the data stream, which must meet the following criteria: +Lowercase only; +Cannot include `\`, `/`, `*`, `?`, `"`, `<`, `>`, `|`, `,`, `#`, `:`, or a space character; +Cannot start with `-`, `_`, `+`, or `.ds-`; +Cannot be `.` or `..`; +Cannot be longer than 255 bytes. Multi-byte characters count towards this limit faster. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + +[discrete] +==== create_from +Create an index from a source index. + +Copy the mappings and settings from the source index to a destination index while allowing request settings and mappings to override the source values. +[source,ts] +---- +client.indices.createFrom({ source, dest }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`source` (string)*: The source index or data stream name +** *`dest` (string)*: The destination index or data stream name +** *`create_from` (Optional, { mappings_override, settings_override, remove_index_blocks })* + +[discrete] +==== data_streams_stats +Get data stream stats. + +Get statistics for one or more data streams. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-data-streams-stats-1[Endpoint documentation] +[source,ts] +---- +client.indices.dataStreamsStats({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`name` (Optional, string)*: List of data streams used to limit the request. +Wildcard expressions (`*`) are supported. +To target all data streams in a cluster, omit this parameter or use `*`. +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of data stream that wildcard patterns can match. +Supports a list of values, such as `open,hidden`. + +[discrete] +==== delete +Delete indices. +Deleting an index deletes its documents, shards, and metadata. +It does not delete related Kibana components, such as data views, visualizations, or dashboards. + +You cannot delete the current write index of a data stream. +To delete the index, you must roll over the data stream so a new write index is created. +You can then use the delete index API to delete the previous write index. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-delete[Endpoint documentation] +[source,ts] +---- +client.indices.delete({ index }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (string | string[])*: List of indices to delete. +You cannot specify index aliases. +By default, this parameter does not support wildcards (`*`) or `_all`. +To use wildcards or `_all`, set the `action.destructive_requires_name` cluster setting to `false`. +** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. +This behavior applies even if the request targets other open indices. +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +Supports a list of values, such as `open,hidden`. +Valid values are: `all`, `open`, `closed`, `hidden`, `none`. +** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error if it targets a missing or closed index. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. + +[discrete] +==== delete_alias +Delete an alias. +Removes a data stream or index from an alias. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-delete-alias[Endpoint documentation] +[source,ts] +---- +client.indices.deleteAlias({ index, name }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (string | string[])*: List of data streams or indices used to limit the request. +Supports wildcards (`*`). +** *`name` (string | string[])*: List of aliases to remove. +Supports wildcards (`*`). To remove all aliases, use `*` or `_all`. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. + +[discrete] +==== delete_data_lifecycle +Delete data stream lifecycles. +Removes the data stream lifecycle from a data stream, rendering it not managed by the data stream lifecycle. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-delete-data-lifecycle[Endpoint documentation] +[source,ts] +---- +client.indices.deleteDataLifecycle({ name }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`name` (string | string[])*: A list of data streams of which the data stream lifecycle will be deleted; use `*` to get all data streams +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Whether wildcard expressions should get expanded to open or closed indices (default: open) +** *`master_timeout` (Optional, string | -1 | 0)*: Specify timeout for connection to master +** *`timeout` (Optional, string | -1 | 0)*: Explicit timestamp for the document + +[discrete] +==== delete_data_stream +Delete data streams. +Deletes one or more data streams and their backing indices. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-delete-data-stream[Endpoint documentation] +[source,ts] +---- +client.indices.deleteDataStream({ name }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`name` (string | string[])*: List of data streams to delete. Wildcard (`*`) expressions are supported. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of data stream that wildcard patterns can match. Supports a list of values,such as `open,hidden`. + +[discrete] +==== delete_index_template +Delete an index template. +The provided may contain multiple template names separated by a comma. If multiple template +names are specified then there is no wildcard support and the provided names should match completely with +existing templates. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-delete-index-template[Endpoint documentation] +[source,ts] +---- +client.indices.deleteIndexTemplate({ name }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`name` (string | string[])*: List of index template names used to limit the request. Wildcard (*) expressions are supported. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + +[discrete] +==== delete_template +Delete a legacy index template. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-delete-template[Endpoint documentation] +[source,ts] +---- +client.indices.deleteTemplate({ name }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`name` (string)*: The name of the legacy index template to delete. +Wildcard (`*`) expressions are supported. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. + +[discrete] +==== disk_usage +Analyze the index disk usage. +Analyze the disk usage of each field of an index or data stream. +This API might not support indices created in previous Elasticsearch versions. +The result of a small index can be inaccurate as some parts of an index might not be analyzed by the API. + +NOTE: The total size of fields of the analyzed shards of the index in the response is usually smaller than the index `store_size` value because some small metadata files are ignored and some parts of data files might not be scanned by the API. +Since stored fields are stored together in a compressed format, the sizes of stored fields are also estimates and can be inaccurate. +The stored size of the `_id` field is likely underestimated while the `_source` field is overestimated. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-disk-usage[Endpoint documentation] +[source,ts] +---- +client.indices.diskUsage({ index }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (string | string[])*: List of data streams, indices, and aliases used to limit the request. +It’s recommended to execute this API with a single index (or the latest backing index of a data stream) as the API consumes resources significantly. +** *`allow_no_indices` (Optional, boolean)*: If false, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. +This behavior applies even if the request targets other open indices. +For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +Supports a list of values, such as `open,hidden`. +** *`flush` (Optional, boolean)*: If `true`, the API performs a flush before analysis. +If `false`, the response may not include uncommitted data. +** *`ignore_unavailable` (Optional, boolean)*: If `true`, missing or closed indices are not included in the response. +** *`run_expensive_tasks` (Optional, boolean)*: Analyzing field disk usage is resource-intensive. +To use the API, this parameter must be set to `true`. + +[discrete] +==== downsample +Downsample an index. +Aggregate a time series (TSDS) index and store pre-computed statistical summaries (`min`, `max`, `sum`, `value_count` and `avg`) for each metric field grouped by a configured time interval. +For example, a TSDS index that contains metrics sampled every 10 seconds can be downsampled to an hourly index. +All documents within an hour interval are summarized and stored as a single document in the downsample index. + +NOTE: Only indices in a time series data stream are supported. +Neither field nor document level security can be defined on the source index. +The source index must be read only (`index.blocks.write: true`). + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-downsample[Endpoint documentation] +[source,ts] +---- +client.indices.downsample({ index, target_index }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (string)*: Name of the time series index to downsample. +** *`target_index` (string)*: Name of the index to create. +** *`config` (Optional, { fixed_interval })* + +[discrete] +==== exists +Check indices. +Check if one or more indices, index aliases, or data streams exist. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-exists[Endpoint documentation] +[source,ts] +---- +client.indices.exists({ index }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (string | string[])*: List of data streams, indices, and aliases. Supports wildcards (`*`). +** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. +This behavior applies even if the request targets other open indices. +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +Supports a list of values, such as `open,hidden`. +Valid values are: `all`, `open`, `closed`, `hidden`, `none`. +** *`flat_settings` (Optional, boolean)*: If `true`, returns settings in flat format. +** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error if it targets a missing or closed index. +** *`include_defaults` (Optional, boolean)*: If `true`, return all default settings in the response. +** *`local` (Optional, boolean)*: If `true`, the request retrieves information from the local node only. + +[discrete] +==== exists_alias +Check aliases. + +Check if one or more data stream or index aliases exist. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-exists-alias[Endpoint documentation] +[source,ts] +---- +client.indices.existsAlias({ name }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`name` (string | string[])*: List of aliases to check. Supports wildcards (`*`). +** *`index` (Optional, string | string[])*: List of data streams or indices used to limit the request. Supports wildcards (`*`). +To target all data streams and indices, omit this parameter or use `*` or `_all`. +** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. +This behavior applies even if the request targets other open indices. +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +Supports a list of values, such as `open,hidden`. +Valid values are: `all`, `open`, `closed`, `hidden`, `none`. +** *`ignore_unavailable` (Optional, boolean)*: If `false`, requests that include a missing data stream or index in the target indices or data streams return an error. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. + +[discrete] +==== exists_index_template +Check index templates. + +Check whether index templates exist. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-exists-index-template[Endpoint documentation] +[source,ts] +---- +client.indices.existsIndexTemplate({ name }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`name` (string)*: List of index template names used to limit the request. Wildcard (*) expressions are supported. +** *`local` (Optional, boolean)*: If true, the request retrieves information from the local node only. Defaults to false, which means information is retrieved from the master node. +** *`flat_settings` (Optional, boolean)*: If true, returns settings in flat format. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. + +[discrete] +==== exists_template +Check existence of index templates. +Get information about whether index templates exist. +Index templates define settings, mappings, and aliases that can be applied automatically to new indices. + +IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-exists-template[Endpoint documentation] +[source,ts] +---- +client.indices.existsTemplate({ name }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`name` (string | string[])*: A list of index template names used to limit the request. +Wildcard (`*`) expressions are supported. +** *`flat_settings` (Optional, boolean)*: Indicates whether to use a flat format for the response. +** *`local` (Optional, boolean)*: Indicates whether to get information from the local node only. +** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for the master node. +If the master node is not available before the timeout expires, the request fails and returns an error. +To indicate that the request should never timeout, set it to `-1`. + +[discrete] +==== explain_data_lifecycle +Get the status for a data stream lifecycle. +Get information about an index or data stream's current data stream lifecycle status, such as time since index creation, time since rollover, the lifecycle configuration managing the index, or any errors encountered during lifecycle execution. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-explain-data-lifecycle[Endpoint documentation] +[source,ts] +---- +client.indices.explainDataLifecycle({ index }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (string | string[])*: The name of the index to explain +** *`include_defaults` (Optional, boolean)*: indicates if the API should return the default values the system uses for the index's lifecycle +** *`master_timeout` (Optional, string | -1 | 0)*: Specify timeout for connection to master + +[discrete] +==== field_usage_stats +Get field usage stats. +Get field usage information for each shard and field of an index. +Field usage statistics are automatically captured when queries are running on a cluster. +A shard-level search request that accesses a given field, even if multiple times during that request, is counted as a single use. + +The response body reports the per-shard usage count of the data structures that back the fields in the index. +A given request will increment each count by a maximum value of 1, even if the request accesses the same field multiple times. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-field-usage-stats[Endpoint documentation] +[source,ts] +---- +client.indices.fieldUsageStats({ index }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (string | string[])*: List or wildcard expression of index names used to limit the request. +** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. +This behavior applies even if the request targets other open indices. +For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +Supports a list of values, such as `open,hidden`. +** *`ignore_unavailable` (Optional, boolean)*: If `true`, missing or closed indices are not included in the response. +** *`fields` (Optional, string | string[])*: List or wildcard expressions of fields to include in the statistics. + +[discrete] +==== flush +Flush data streams or indices. +Flushing a data stream or index is the process of making sure that any data that is currently only stored in the transaction log is also permanently stored in the Lucene index. +When restarting, Elasticsearch replays any unflushed operations from the transaction log into the Lucene index to bring it back into the state that it was in before the restart. +Elasticsearch automatically triggers flushes as needed, using heuristics that trade off the size of the unflushed transaction log against the cost of performing each flush. + +After each operation has been flushed it is permanently stored in the Lucene index. +This may mean that there is no need to maintain an additional copy of it in the transaction log. +The transaction log is made up of multiple files, called generations, and Elasticsearch will delete any generation files when they are no longer needed, freeing up disk space. + +It is also possible to trigger a flush on one or more indices using the flush API, although it is rare for users to need to call this API directly. +If you call the flush API after indexing some documents then a successful response indicates that Elasticsearch has flushed all the documents that were indexed before the flush API was called. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-flush[Endpoint documentation] +[source,ts] +---- +client.indices.flush({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (Optional, string | string[])*: List of data streams, indices, and aliases to flush. +Supports wildcards (`*`). +To flush all data streams and indices, omit this parameter or use `*` or `_all`. +** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. +This behavior applies even if the request targets other open indices. +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +Supports a list of values, such as `open,hidden`. +Valid values are: `all`, `open`, `closed`, `hidden`, `none`. +** *`force` (Optional, boolean)*: If `true`, the request forces a flush even if there are no changes to commit to the index. +** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error if it targets a missing or closed index. +** *`wait_if_ongoing` (Optional, boolean)*: If `true`, the flush operation blocks until execution when another flush operation is running. +If `false`, Elasticsearch returns an error if you request a flush when another flush operation is running. + +[discrete] +==== forcemerge +Force a merge. +Perform the force merge operation on the shards of one or more indices. +For data streams, the API forces a merge on the shards of the stream's backing indices. + +Merging reduces the number of segments in each shard by merging some of them together and also frees up the space used by deleted documents. +Merging normally happens automatically, but sometimes it is useful to trigger a merge manually. + +WARNING: We recommend force merging only a read-only index (meaning the index is no longer receiving writes). +When documents are updated or deleted, the old version is not immediately removed but instead soft-deleted and marked with a "tombstone". +These soft-deleted documents are automatically cleaned up during regular segment merges. +But force merge can cause very large (greater than 5 GB) segments to be produced, which are not eligible for regular merges. +So the number of soft-deleted documents can then grow rapidly, resulting in higher disk usage and worse search performance. +If you regularly force merge an index receiving writes, this can also make snapshots more expensive, since the new documents can't be backed up incrementally. + +**Blocks during a force merge** + +Calls to this API block until the merge is complete (unless request contains `wait_for_completion=false`). +If the client connection is lost before completion then the force merge process will continue in the background. +Any new requests to force merge the same indices will also block until the ongoing force merge is complete. + +**Running force merge asynchronously** + +If the request contains `wait_for_completion=false`, Elasticsearch performs some preflight checks, launches the request, and returns a task you can use to get the status of the task. +However, you can not cancel this task as the force merge task is not cancelable. +Elasticsearch creates a record of this task as a document at `_tasks/`. +When you are done with a task, you should delete the task document so Elasticsearch can reclaim the space. + +**Force merging multiple indices** + +You can force merge multiple indices with a single request by targeting: + +* One or more data streams that contain multiple backing indices +* Multiple indices +* One or more aliases +* All data streams and indices in a cluster + +Each targeted shard is force-merged separately using the force_merge threadpool. +By default each node only has a single `force_merge` thread which means that the shards on that node are force-merged one at a time. +If you expand the `force_merge` threadpool on a node then it will force merge its shards in parallel + +Force merge makes the storage for the shard being merged temporarily increase, as it may require free space up to triple its size in case `max_num_segments parameter` is set to `1`, to rewrite all segments into a new one. + +**Data streams and time-based indices** + +Force-merging is useful for managing a data stream's older backing indices and other time-based indices, particularly after a rollover. +In these cases, each index only receives indexing traffic for a certain period of time. +Once an index receive no more writes, its shards can be force-merged to a single segment. +This can be a good idea because single-segment shards can sometimes use simpler and more efficient data structures to perform searches. +For example: + +---- +POST /.ds-my-data-stream-2099.03.07-000001/_forcemerge?max_num_segments=1 +---- + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-forcemerge[Endpoint documentation] +[source,ts] +---- +client.indices.forcemerge({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (Optional, string | string[])*: A list of index names; use `_all` or empty string to perform the operation on all indices +** *`allow_no_indices` (Optional, boolean)*: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Whether to expand wildcard expression to concrete indices that are open, closed or both. +** *`flush` (Optional, boolean)*: Specify whether the index should be flushed after performing the operation (default: true) +** *`ignore_unavailable` (Optional, boolean)*: Whether specified concrete indices should be ignored when unavailable (missing or closed) +** *`max_num_segments` (Optional, number)*: The number of segments the index should be merged into (default: dynamic) +** *`only_expunge_deletes` (Optional, boolean)*: Specify whether the operation should only expunge deleted documents +** *`wait_for_completion` (Optional, boolean)*: Should the request wait until the force merge is completed. + +[discrete] +==== get +Get index information. +Get information about one or more indices. For data streams, the API returns information about the +stream’s backing indices. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get[Endpoint documentation] +[source,ts] +---- +client.indices.get({ index }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (string | string[])*: List of data streams, indices, and index aliases used to limit the request. +Wildcard expressions (*) are supported. +** *`allow_no_indices` (Optional, boolean)*: If false, the request returns an error if any wildcard expression, index alias, or _all value targets only +missing or closed indices. This behavior applies even if the request targets other open indices. For example, +a request targeting foo*,bar* returns an error if an index starts with foo but no index starts with bar. +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard expressions can match. If the request can target data streams, this argument +determines whether wildcard expressions match hidden data streams. Supports a list of values, +such as open,hidden. +** *`flat_settings` (Optional, boolean)*: If true, returns settings in flat format. +** *`ignore_unavailable` (Optional, boolean)*: If false, requests that target a missing index return an error. +** *`include_defaults` (Optional, boolean)*: If true, return all default settings in the response. +** *`local` (Optional, boolean)*: If true, the request retrieves information from the local node only. Defaults to false, which means information is retrieved from the master node. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +** *`features` (Optional, { name, description } | { name, description }[])*: Return only information on specified index features + +[discrete] +==== get_alias +Get aliases. +Retrieves information for one or more data stream or index aliases. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-alias[Endpoint documentation] +[source,ts] +---- +client.indices.getAlias({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`name` (Optional, string | string[])*: List of aliases to retrieve. +Supports wildcards (`*`). +To retrieve all aliases, omit this parameter or use `*` or `_all`. +** *`index` (Optional, string | string[])*: List of data streams or indices used to limit the request. +Supports wildcards (`*`). +To target all data streams and indices, omit this parameter or use `*` or `_all`. +** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. +This behavior applies even if the request targets other open indices. +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +Supports a list of values, such as `open,hidden`. +Valid values are: `all`, `open`, `closed`, `hidden`, `none`. +** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error if it targets a missing or closed index. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. + +[discrete] +==== get_data_lifecycle +Get data stream lifecycles. + +Get the data stream lifecycle configuration of one or more data streams. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-data-lifecycle[Endpoint documentation] +[source,ts] +---- +client.indices.getDataLifecycle({ name }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`name` (string | string[])*: List of data streams to limit the request. +Supports wildcards (`*`). +To target all data streams, omit this parameter or use `*` or `_all`. +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of data stream that wildcard patterns can match. +Supports a list of values, such as `open,hidden`. +Valid values are: `all`, `open`, `closed`, `hidden`, `none`. +** *`include_defaults` (Optional, boolean)*: If `true`, return all default settings in the response. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. + +[discrete] +==== get_data_lifecycle_stats +Get data stream lifecycle stats. +Get statistics about the data streams that are managed by a data stream lifecycle. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-data-lifecycle-stats[Endpoint documentation] +[source,ts] +---- +client.indices.getDataLifecycleStats() +---- + + +[discrete] +==== get_data_stream +Get data streams. + +Get information about one or more data streams. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-data-stream[Endpoint documentation] +[source,ts] +---- +client.indices.getDataStream({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`name` (Optional, string | string[])*: List of data stream names used to limit the request. +Wildcard (`*`) expressions are supported. If omitted, all data streams are returned. +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of data stream that wildcard patterns can match. +Supports a list of values, such as `open,hidden`. +** *`include_defaults` (Optional, boolean)*: If true, returns all relevant default configurations for the index template. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +** *`verbose` (Optional, boolean)*: Whether the maximum timestamp for each data stream should be calculated and returned. + +[discrete] +==== get_field_mapping +Get mapping definitions. +Retrieves mapping definitions for one or more fields. +For data streams, the API retrieves field mappings for the stream’s backing indices. + +This API is useful if you don't need a complete mapping or if an index mapping contains a large number of fields. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-mapping[Endpoint documentation] +[source,ts] +---- +client.indices.getFieldMapping({ fields }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`fields` (string | string[])*: List or wildcard expression of fields used to limit returned information. +Supports wildcards (`*`). +** *`index` (Optional, string | string[])*: List of data streams, indices, and aliases used to limit the request. +Supports wildcards (`*`). +To target all data streams and indices, omit this parameter or use `*` or `_all`. +** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. +This behavior applies even if the request targets other open indices. +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +Supports a list of values, such as `open,hidden`. +Valid values are: `all`, `open`, `closed`, `hidden`, `none`. +** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error if it targets a missing or closed index. +** *`include_defaults` (Optional, boolean)*: If `true`, return all default settings in the response. +** *`local` (Optional, boolean)*: If `true`, the request retrieves information from the local node only. + +[discrete] +==== get_index_template +Get index templates. +Get information about one or more index templates. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-index-template[Endpoint documentation] +[source,ts] +---- +client.indices.getIndexTemplate({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`name` (Optional, string)*: List of index template names used to limit the request. Wildcard (*) expressions are supported. +** *`local` (Optional, boolean)*: If true, the request retrieves information from the local node only. Defaults to false, which means information is retrieved from the master node. +** *`flat_settings` (Optional, boolean)*: If true, returns settings in flat format. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +** *`include_defaults` (Optional, boolean)*: If true, returns all relevant default configurations for the index template. + +[discrete] +==== get_mapping +Get mapping definitions. +For data streams, the API retrieves mappings for the stream’s backing indices. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-mapping[Endpoint documentation] +[source,ts] +---- +client.indices.getMapping({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (Optional, string | string[])*: List of data streams, indices, and aliases used to limit the request. +Supports wildcards (`*`). +To target all data streams and indices, omit this parameter or use `*` or `_all`. +** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. +This behavior applies even if the request targets other open indices. +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +Supports a list of values, such as `open,hidden`. +Valid values are: `all`, `open`, `closed`, `hidden`, `none`. +** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error if it targets a missing or closed index. +** *`local` (Optional, boolean)*: If `true`, the request retrieves information from the local node only. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. + +[discrete] +==== get_migrate_reindex_status +Get the migration reindexing status. + +Get the status of a migration reindex attempt for a data stream or index. +[source,ts] +---- +client.indices.getMigrateReindexStatus({ index }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (string | string[])*: The index or data stream name. + +[discrete] +==== get_settings +Get index settings. +Get setting information for one or more indices. +For data streams, it returns setting information for the stream's backing indices. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-settings[Endpoint documentation] +[source,ts] +---- +client.indices.getSettings({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (Optional, string | string[])*: List of data streams, indices, and aliases used to limit +the request. Supports wildcards (`*`). To target all data streams and +indices, omit this parameter or use `*` or `_all`. +** *`name` (Optional, string | string[])*: List or wildcard expression of settings to retrieve. +** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index +alias, or `_all` value targets only missing or closed indices. This +behavior applies even if the request targets other open indices. For +example, a request targeting `foo*,bar*` returns an error if an index +starts with foo but no index starts with `bar`. +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +Supports a list of values, such as `open,hidden`. +** *`flat_settings` (Optional, boolean)*: If `true`, returns settings in flat format. +** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error if it targets a missing or closed index. +** *`include_defaults` (Optional, boolean)*: If `true`, return all default settings in the response. +** *`local` (Optional, boolean)*: If `true`, the request retrieves information from the local node only. If +`false`, information is retrieved from the master node. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is +received before the timeout expires, the request fails and returns an +error. + +[discrete] +==== get_template +Get index templates. +Get information about one or more index templates. + +IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-template[Endpoint documentation] +[source,ts] +---- +client.indices.getTemplate({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`name` (Optional, string | string[])*: List of index template names used to limit the request. +Wildcard (`*`) expressions are supported. +To return all index templates, omit this parameter or use a value of `_all` or `*`. +** *`flat_settings` (Optional, boolean)*: If `true`, returns settings in flat format. +** *`local` (Optional, boolean)*: If `true`, the request retrieves information from the local node only. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. + +[discrete] +==== migrate_reindex +Reindex legacy backing indices. + +Reindex all legacy backing indices for a data stream. +This operation occurs in a persistent task. +The persistent task ID is returned immediately and the reindexing work is completed in that task. +[source,ts] +---- +client.indices.migrateReindex({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`reindex` (Optional, { mode, source })* + +[discrete] +==== migrate_to_data_stream +Convert an index alias to a data stream. +Converts an index alias to a data stream. +You must have a matching index template that is data stream enabled. +The alias must meet the following criteria: +The alias must have a write index; +All indices for the alias must have a `@timestamp` field mapping of a `date` or `date_nanos` field type; +The alias must not have any filters; +The alias must not use custom routing. +If successful, the request removes the alias and creates a data stream with the same name. +The indices for the alias become hidden backing indices for the stream. +The write index for the alias becomes the write index for the stream. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-migrate-to-data-stream[Endpoint documentation] +[source,ts] +---- +client.indices.migrateToDataStream({ name }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`name` (string)*: Name of the index alias to convert to a data stream. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + +[discrete] +==== modify_data_stream +Update data streams. +Performs one or more data stream modification actions in a single atomic operation. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-modify-data-stream[Endpoint documentation] +[source,ts] +---- +client.indices.modifyDataStream({ actions }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`actions` ({ add_backing_index, remove_backing_index }[])*: Actions to perform. + +[discrete] +==== open +Open a closed index. +For data streams, the API opens any closed backing indices. + +A closed index is blocked for read/write operations and does not allow all operations that opened indices allow. +It is not possible to index documents or to search for documents in a closed index. +This allows closed indices to not have to maintain internal data structures for indexing or searching documents, resulting in a smaller overhead on the cluster. + +When opening or closing an index, the master is responsible for restarting the index shards to reflect the new state of the index. +The shards will then go through the normal recovery process. +The data of opened or closed indices is automatically replicated by the cluster to ensure that enough shard copies are safely kept around at all times. + +You can open and close multiple indices. +An error is thrown if the request explicitly refers to a missing index. +This behavior can be turned off by using the `ignore_unavailable=true` parameter. + +By default, you must explicitly name the indices you are opening or closing. +To open or close indices with `_all`, `*`, or other wildcard expressions, change the `action.destructive_requires_name` setting to `false`. +This setting can also be changed with the cluster update settings API. + +Closed indices consume a significant amount of disk-space which can cause problems in managed environments. +Closing indices can be turned off with the cluster settings API by setting `cluster.indices.close.enable` to `false`. + +Because opening or closing an index allocates its shards, the `wait_for_active_shards` setting on index creation applies to the `_open` and `_close` index actions as well. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-open[Endpoint documentation] +[source,ts] +---- +client.indices.open({ index }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (string | string[])*: List of data streams, indices, and aliases used to limit the request. +Supports wildcards (`*`). +By default, you must explicitly name the indices you using to limit the request. +To limit a request using `_all`, `*`, or other wildcard expressions, change the `action.destructive_requires_name` setting to false. +You can update this setting in the `elasticsearch.yml` file or using the cluster update settings API. +** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. +This behavior applies even if the request targets other open indices. +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +Supports a list of values, such as `open,hidden`. +Valid values are: `all`, `open`, `closed`, `hidden`, `none`. +** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error if it targets a missing or closed index. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. +** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: The number of shard copies that must be active before proceeding with the operation. +Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). + +[discrete] +==== promote_data_stream +Promote a data stream. +Promote a data stream from a replicated data stream managed by cross-cluster replication (CCR) to a regular data stream. + +With CCR auto following, a data stream from a remote cluster can be replicated to the local cluster. +These data streams can't be rolled over in the local cluster. +These replicated data streams roll over only if the upstream data stream rolls over. +In the event that the remote cluster is no longer available, the data stream in the local cluster can be promoted to a regular data stream, which allows these data streams to be rolled over in the local cluster. + +NOTE: When promoting a data stream, ensure the local cluster has a data stream enabled index template that matches the data stream. +If this is missing, the data stream will not be able to roll over until a matching index template is created. +This will affect the lifecycle management of the data stream and interfere with the data stream size and retention. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-promote-data-stream[Endpoint documentation] +[source,ts] +---- +client.indices.promoteDataStream({ name }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`name` (string)*: The name of the data stream +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. + +[discrete] +==== put_alias +Create or update an alias. +Adds a data stream or index to an alias. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-alias[Endpoint documentation] +[source,ts] +---- +client.indices.putAlias({ index, name }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (string | string[])*: List of data streams or indices to add. +Supports wildcards (`*`). +Wildcard patterns that match both data streams and indices return an error. +** *`name` (string)*: Alias to update. +If the alias doesn’t exist, the request creates it. +Index alias names support date math. +** *`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Query used to limit documents the alias can access. +** *`index_routing` (Optional, string)*: Value used to route indexing operations to a specific shard. +If specified, this overwrites the `routing` value for indexing operations. +Data stream aliases don’t support this parameter. +** *`is_write_index` (Optional, boolean)*: If `true`, sets the write index or data stream for the alias. +If an alias points to multiple indices or data streams and `is_write_index` isn’t set, the alias rejects write requests. +If an index alias points to one index and `is_write_index` isn’t set, the index automatically acts as the write index. +Data stream aliases don’t automatically set a write data stream, even if the alias points to one data stream. +** *`routing` (Optional, string)*: Value used to route indexing and search operations to a specific shard. +Data stream aliases don’t support this parameter. +** *`search_routing` (Optional, string)*: Value used to route search operations to a specific shard. +If specified, this overwrites the `routing` value for search operations. +Data stream aliases don’t support this parameter. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. + +[discrete] +==== put_data_lifecycle +Update data stream lifecycles. +Update the data stream lifecycle of the specified data streams. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-data-lifecycle[Endpoint documentation] +[source,ts] +---- +client.indices.putDataLifecycle({ name }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`name` (string | string[])*: List of data streams used to limit the request. +Supports wildcards (`*`). +To target all data streams use `*` or `_all`. +** *`data_retention` (Optional, string | -1 | 0)*: If defined, every document added to this data stream will be stored at least for this time frame. +Any time after this duration the document could be deleted. +When empty, every document in this data stream will be stored indefinitely. +** *`downsampling` (Optional, { rounds })*: The downsampling configuration to execute for the managed backing index after rollover. +** *`enabled` (Optional, boolean)*: If defined, it turns data stream lifecycle on/off (`true`/`false`) for this data stream. A data stream lifecycle +that's disabled (enabled: `false`) will have no effect on the data stream. +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of data stream that wildcard patterns can match. +Supports a list of values, such as `open,hidden`. +Valid values are: `all`, `hidden`, `open`, `closed`, `none`. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is +received before the timeout expires, the request fails and returns an +error. +** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. + +[discrete] +==== put_index_template +Create or update an index template. +Index templates define settings, mappings, and aliases that can be applied automatically to new indices. + +Elasticsearch applies templates to new indices based on an wildcard pattern that matches the index name. +Index templates are applied during data stream or index creation. +For data streams, these settings and mappings are applied when the stream's backing indices are created. +Settings and mappings specified in a create index API request override any settings or mappings specified in an index template. +Changes to index templates do not affect existing indices, including the existing backing indices of a data stream. + +You can use C-style `/* *\/` block comments in index templates. +You can include comments anywhere in the request body, except before the opening curly bracket. + +**Multiple matching templates** + +If multiple index templates match the name of a new index or data stream, the template with the highest priority is used. + +Multiple templates with overlapping index patterns at the same priority are not allowed and an error will be thrown when attempting to create a template matching an existing index template at identical priorities. + +**Composing aliases, mappings, and settings** + +When multiple component templates are specified in the `composed_of` field for an index template, they are merged in the order specified, meaning that later component templates override earlier component templates. +Any mappings, settings, or aliases from the parent index template are merged in next. +Finally, any configuration on the index request itself is merged. +Mapping definitions are merged recursively, which means that later mapping components can introduce new field mappings and update the mapping configuration. +If a field mapping is already contained in an earlier component, its definition will be completely overwritten by the later one. +This recursive merging strategy applies not only to field mappings, but also root options like `dynamic_templates` and `meta`. +If an earlier component contains a `dynamic_templates` block, then by default new `dynamic_templates` entries are appended onto the end. +If an entry already exists with the same key, then it is overwritten by the new definition. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-index-template[Endpoint documentation] +[source,ts] +---- +client.indices.putIndexTemplate({ name }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`name` (string)*: Index or template name +** *`index_patterns` (Optional, string | string[])*: Name of the index template to create. +** *`composed_of` (Optional, string[])*: An ordered list of component template names. +Component templates are merged in the order specified, meaning that the last component template specified has the highest precedence. +** *`template` (Optional, { aliases, mappings, settings, lifecycle })*: Template to be applied. +It may optionally include an `aliases`, `mappings`, or `settings` configuration. +** *`data_stream` (Optional, { hidden, allow_custom_routing })*: If this object is included, the template is used to create data streams and their backing indices. +Supports an empty object. +Data streams require a matching index template with a `data_stream` object. +** *`priority` (Optional, number)*: Priority to determine index template precedence when a new data stream or index is created. +The index template with the highest priority is chosen. +If no priority is specified the template is treated as though it is of priority 0 (lowest priority). +This number is not automatically generated by Elasticsearch. +** *`version` (Optional, number)*: Version number used to manage index templates externally. +This number is not automatically generated by Elasticsearch. +External systems can use these version numbers to simplify template management. +To unset a version, replace the template without specifying one. +** *`_meta` (Optional, Record)*: Optional user metadata about the index template. +It may have any contents. +It is not automatically generated or used by Elasticsearch. +This user-defined object is stored in the cluster state, so keeping it short is preferable +To unset the metadata, replace the template without specifying it. +** *`allow_auto_create` (Optional, boolean)*: This setting overrides the value of the `action.auto_create_index` cluster setting. +If set to `true` in a template, then indices can be automatically created using that template even if auto-creation of indices is disabled via `actions.auto_create_index`. +If set to `false`, then indices or data streams matching the template must always be explicitly created, and may never be automatically created. +** *`ignore_missing_component_templates` (Optional, string[])*: The configuration option ignore_missing_component_templates can be used when an index template +references a component template that might not exist +** *`deprecated` (Optional, boolean)*: Marks this index template as deprecated. When creating or updating a non-deprecated index template +that uses deprecated components, Elasticsearch will emit a deprecation warning. +** *`create` (Optional, boolean)*: If `true`, this request cannot replace or update existing index templates. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +** *`cause` (Optional, string)*: User defined reason for creating/updating the index template + +[discrete] +==== put_mapping +Update field mappings. +Add new fields to an existing data stream or index. +You can also use this API to change the search settings of existing fields and add new properties to existing object fields. +For data streams, these changes are applied to all backing indices by default. + +**Add multi-fields to an existing field** + +Multi-fields let you index the same field in different ways. +You can use this API to update the fields mapping parameter and enable multi-fields for an existing field. +WARNING: If an index (or data stream) contains documents when you add a multi-field, those documents will not have values for the new multi-field. +You can populate the new multi-field with the update by query API. + +**Change supported mapping parameters for an existing field** + +The documentation for each mapping parameter indicates whether you can update it for an existing field using this API. +For example, you can use the update mapping API to update the `ignore_above` parameter. + +**Change the mapping of an existing field** + +Except for supported mapping parameters, you can't change the mapping or field type of an existing field. +Changing an existing field could invalidate data that's already indexed. + +If you need to change the mapping of a field in a data stream's backing indices, refer to documentation about modifying data streams. +If you need to change the mapping of a field in other indices, create a new index with the correct mapping and reindex your data into that index. + +**Rename a field** + +Renaming a field would invalidate data already indexed under the old field name. +Instead, add an alias field to create an alternate field name. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-mapping[Endpoint documentation] +[source,ts] +---- +client.indices.putMapping({ index }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (string | string[])*: A list of index names the mapping should be added to (supports wildcards); use `_all` or omit to add the mapping on all indices. +** *`date_detection` (Optional, boolean)*: Controls whether dynamic date detection is enabled. +** *`dynamic` (Optional, Enum("strict" | "runtime" | true | false))*: Controls whether new fields are added dynamically. +** *`dynamic_date_formats` (Optional, string[])*: If date detection is enabled then new string fields are checked +against 'dynamic_date_formats' and if the value matches then +a new date field is added instead of string. +** *`dynamic_templates` (Optional, Record[])*: Specify dynamic templates for the mapping. +** *`_field_names` (Optional, { enabled })*: Control whether field names are enabled for the index. +** *`_meta` (Optional, Record)*: A mapping type can have custom meta data associated with it. These are +not used at all by Elasticsearch, but can be used to store +application-specific metadata. +** *`numeric_detection` (Optional, boolean)*: Automatically map strings into numeric data types for all fields. +** *`properties` (Optional, Record)*: Mapping for a field. For new fields, this mapping can include: + +- Field name +- Field data type +- Mapping parameters +** *`_routing` (Optional, { required })*: Enable making a routing value required on indexed documents. +** *`_source` (Optional, { compress, compress_threshold, enabled, excludes, includes, mode })*: Control whether the _source field is enabled on the index. +** *`runtime` (Optional, Record)*: Mapping of runtime fields for the index. +** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. +This behavior applies even if the request targets other open indices. +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +Supports a list of values, such as `open,hidden`. +Valid values are: `all`, `open`, `closed`, `hidden`, `none`. +** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error if it targets a missing or closed index. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. +** *`write_index_only` (Optional, boolean)*: If `true`, the mappings are applied only to the current write index for the target. + +[discrete] +==== put_settings +Update index settings. +Changes dynamic index settings in real time. +For data streams, index setting changes are applied to all backing indices by default. + +To revert a setting to the default value, use a null value. +The list of per-index settings that can be updated dynamically on live indices can be found in index module documentation. +To preserve existing settings from being updated, set the `preserve_existing` parameter to `true`. + +NOTE: You can only define new analyzers on closed indices. +To add an analyzer, you must close the index, define the analyzer, and reopen the index. +You cannot close the write index of a data stream. +To update the analyzer for a data stream's write index and future backing indices, update the analyzer in the index template used by the stream. +Then roll over the data stream to apply the new analyzer to the stream's write index and future backing indices. +This affects searches and any new data added to the stream after the rollover. +However, it does not affect the data stream's backing indices or their existing data. +To change the analyzer for existing backing indices, you must create a new data stream and reindex your data into it. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-settings[Endpoint documentation] +[source,ts] +---- +client.indices.putSettings({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (Optional, string | string[])*: List of data streams, indices, and aliases used to limit +the request. Supports wildcards (`*`). To target all data streams and +indices, omit this parameter or use `*` or `_all`. +** *`settings` (Optional, { index, mode, routing_path, soft_deletes, sort, number_of_shards, number_of_replicas, number_of_routing_shards, check_on_startup, codec, routing_partition_size, load_fixed_bitset_filters_eagerly, hidden, auto_expand_replicas, merge, search, refresh_interval, max_result_window, max_inner_result_window, max_rescore_window, max_docvalue_fields_search, max_script_fields, max_ngram_diff, max_shingle_diff, blocks, max_refresh_listeners, analyze, highlight, max_terms_count, max_regex_length, routing, gc_deletes, default_pipeline, final_pipeline, lifecycle, provided_name, creation_date, creation_date_string, uuid, version, verified_before_close, format, max_slices_per_scroll, translog, query_string, priority, top_metrics_max_size, analysis, settings, time_series, queries, similarity, mapping, indexing.slowlog, indexing_pressure, store })* +** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index +alias, or `_all` value targets only missing or closed indices. This +behavior applies even if the request targets other open indices. For +example, a request targeting `foo*,bar*` returns an error if an index +starts with `foo` but no index starts with `bar`. +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. If the request can target +data streams, this argument determines whether wildcard expressions match +hidden data streams. Supports a list of values, such as +`open,hidden`. +** *`flat_settings` (Optional, boolean)*: If `true`, returns settings in flat format. +** *`ignore_unavailable` (Optional, boolean)*: If `true`, returns settings in flat format. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is +received before the timeout expires, the request fails and returns an +error. +** *`preserve_existing` (Optional, boolean)*: If `true`, existing index settings remain unchanged. +** *`reopen` (Optional, boolean)*: Whether to close and reopen the index to apply non-dynamic settings. +If set to `true` the indices to which the settings are being applied +will be closed temporarily and then reopened in order to apply the changes. +** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the + timeout expires, the request fails and returns an error. + +[discrete] +==== put_template +Create or update an index template. +Index templates define settings, mappings, and aliases that can be applied automatically to new indices. +Elasticsearch applies templates to new indices based on an index pattern that matches the index name. + +IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8. + +Composable templates always take precedence over legacy templates. +If no composable template matches a new index, matching legacy templates are applied according to their order. + +Index templates are only applied during index creation. +Changes to index templates do not affect existing indices. +Settings and mappings specified in create index API requests override any settings or mappings specified in an index template. + +You can use C-style `/* *\/` block comments in index templates. +You can include comments anywhere in the request body, except before the opening curly bracket. + +**Indices matching multiple templates** + +Multiple index templates can potentially match an index, in this case, both the settings and mappings are merged into the final configuration of the index. +The order of the merging can be controlled using the order parameter, with lower order being applied first, and higher orders overriding them. +NOTE: Multiple matching templates with the same order value will result in a non-deterministic merging order. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-template[Endpoint documentation] +[source,ts] +---- +client.indices.putTemplate({ name }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`name` (string)*: The name of the template +** *`aliases` (Optional, Record)*: Aliases for the index. +** *`index_patterns` (Optional, string | string[])*: Array of wildcard expressions used to match the names +of indices during creation. +** *`mappings` (Optional, { all_field, date_detection, dynamic, dynamic_date_formats, dynamic_templates, _field_names, index_field, _meta, numeric_detection, properties, _routing, _size, _source, runtime, enabled, subobjects, _data_stream_timestamp })*: Mapping for fields in the index. +** *`order` (Optional, number)*: Order in which Elasticsearch applies this template if index +matches multiple templates. + +Templates with lower 'order' values are merged first. Templates with higher +'order' values are merged later, overriding templates with lower values. +** *`settings` (Optional, { index, mode, routing_path, soft_deletes, sort, number_of_shards, number_of_replicas, number_of_routing_shards, check_on_startup, codec, routing_partition_size, load_fixed_bitset_filters_eagerly, hidden, auto_expand_replicas, merge, search, refresh_interval, max_result_window, max_inner_result_window, max_rescore_window, max_docvalue_fields_search, max_script_fields, max_ngram_diff, max_shingle_diff, blocks, max_refresh_listeners, analyze, highlight, max_terms_count, max_regex_length, routing, gc_deletes, default_pipeline, final_pipeline, lifecycle, provided_name, creation_date, creation_date_string, uuid, version, verified_before_close, format, max_slices_per_scroll, translog, query_string, priority, top_metrics_max_size, analysis, settings, time_series, queries, similarity, mapping, indexing.slowlog, indexing_pressure, store })*: Configuration options for the index. +** *`version` (Optional, number)*: Version number used to manage index templates externally. This number +is not automatically generated by Elasticsearch. +To unset a version, replace the template without specifying one. +** *`create` (Optional, boolean)*: If true, this request cannot replace or update existing index templates. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is +received before the timeout expires, the request fails and returns an error. +** *`cause` (Optional, string)* + +[discrete] +==== recovery +Get index recovery information. +Get information about ongoing and completed shard recoveries for one or more indices. +For data streams, the API returns information for the stream's backing indices. + +All recoveries, whether ongoing or complete, are kept in the cluster state and may be reported on at any time. + +Shard recovery is the process of initializing a shard copy, such as restoring a primary shard from a snapshot or creating a replica shard from a primary shard. +When a shard recovery completes, the recovered shard is available for search and indexing. + +Recovery automatically occurs during the following processes: + +* When creating an index for the first time. +* When a node rejoins the cluster and starts up any missing primary shard copies using the data that it holds in its data path. +* Creation of new replica shard copies from the primary. +* Relocation of a shard copy to a different node in the same cluster. +* A snapshot restore operation. +* A clone, shrink, or split operation. + +You can determine the cause of a shard recovery using the recovery or cat recovery APIs. + +The index recovery API reports information about completed recoveries only for shard copies that currently exist in the cluster. +It only reports the last recovery for each shard copy and does not report historical information about earlier recoveries, nor does it report information about the recoveries of shard copies that no longer exist. +This means that if a shard copy completes a recovery and then Elasticsearch relocates it onto a different node then the information about the original recovery will not be shown in the recovery API. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-recovery[Endpoint documentation] +[source,ts] +---- +client.indices.recovery({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (Optional, string | string[])*: List of data streams, indices, and aliases used to limit the request. +Supports wildcards (`*`). +To target all data streams and indices, omit this parameter or use `*` or `_all`. +** *`active_only` (Optional, boolean)*: If `true`, the response only includes ongoing shard recoveries. +** *`detailed` (Optional, boolean)*: If `true`, the response includes detailed information about shard recoveries. + +[discrete] +==== refresh +Refresh an index. +A refresh makes recent operations performed on one or more indices available for search. +For data streams, the API runs the refresh operation on the stream’s backing indices. + +By default, Elasticsearch periodically refreshes indices every second, but only on indices that have received one search request or more in the last 30 seconds. +You can change this default interval with the `index.refresh_interval` setting. + +Refresh requests are synchronous and do not return a response until the refresh operation completes. + +Refreshes are resource-intensive. +To ensure good cluster performance, it's recommended to wait for Elasticsearch's periodic refresh rather than performing an explicit refresh when possible. + +If your application workflow indexes documents and then runs a search to retrieve the indexed document, it's recommended to use the index API's `refresh=wait_for` query parameter option. +This option ensures the indexing operation waits for a periodic refresh before running the search. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-refresh[Endpoint documentation] +[source,ts] +---- +client.indices.refresh({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (Optional, string | string[])*: List of data streams, indices, and aliases used to limit the request. +Supports wildcards (`*`). +To target all data streams and indices, omit this parameter or use `*` or `_all`. +** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. +This behavior applies even if the request targets other open indices. +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +Supports a list of values, such as `open,hidden`. +Valid values are: `all`, `open`, `closed`, `hidden`, `none`. +** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error if it targets a missing or closed index. + +[discrete] +==== reload_search_analyzers +Reload search analyzers. +Reload an index's search analyzers and their resources. +For data streams, the API reloads search analyzers and resources for the stream's backing indices. + +IMPORTANT: After reloading the search analyzers you should clear the request cache to make sure it doesn't contain responses derived from the previous versions of the analyzer. + +You can use the reload search analyzers API to pick up changes to synonym files used in the `synonym_graph` or `synonym` token filter of a search analyzer. +To be eligible, the token filter must have an `updateable` flag of `true` and only be used in search analyzers. + +NOTE: This API does not perform a reload for each shard of an index. +Instead, it performs a reload for each node containing index shards. +As a result, the total shard count returned by the API can differ from the number of index shards. +Because reloading affects every node with an index shard, it is important to update the synonym file on every data node in the cluster--including nodes that don't contain a shard replica--before using this API. +This ensures the synonym file is updated everywhere in the cluster in case shards are relocated in the future. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-reload-search-analyzers[Endpoint documentation] +[source,ts] +---- +client.indices.reloadSearchAnalyzers({ index }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (string | string[])*: A list of index names to reload analyzers for +** *`allow_no_indices` (Optional, boolean)*: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Whether to expand wildcard expression to concrete indices that are open, closed or both. +** *`ignore_unavailable` (Optional, boolean)*: Whether specified concrete indices should be ignored when unavailable (missing or closed) +** *`resource` (Optional, string)*: Changed resource to reload analyzers from if applicable + +[discrete] +==== resolve_cluster +Resolve the cluster. + +Resolve the specified index expressions to return information about each cluster, including the local "querying" cluster, if included. +If no index expression is provided, the API will return information about all the remote clusters that are configured on the querying cluster. + +This endpoint is useful before doing a cross-cluster search in order to determine which remote clusters should be included in a search. + +You use the same index expression with this endpoint as you would for cross-cluster search. +Index and cluster exclusions are also supported with this endpoint. + +For each cluster in the index expression, information is returned about: + +* Whether the querying ("local") cluster is currently connected to each remote cluster specified in the index expression. Note that this endpoint actively attempts to contact the remote clusters, unlike the `remote/info` endpoint. +* Whether each remote cluster is configured with `skip_unavailable` as `true` or `false`. +* Whether there are any indices, aliases, or data streams on that cluster that match the index expression. +* Whether the search is likely to have errors returned when you do the cross-cluster search (including any authorization errors if you do not have permission to query the index). +* Cluster version information, including the Elasticsearch server version. + +For example, `GET /_resolve/cluster/my-index-*,cluster*:my-index-*` returns information about the local cluster and all remotely configured clusters that start with the alias `cluster*`. +Each cluster returns information about whether it has any indices, aliases or data streams that match `my-index-*`. + +## Note on backwards compatibility +The ability to query without an index expression was added in version 8.18, so when +querying remote clusters older than that, the local cluster will send the index +expression `dummy*` to those remote clusters. Thus, if an errors occur, you may see a reference +to that index expression even though you didn't request it. If it causes a problem, you can +instead include an index expression like `*:*` to bypass the issue. + +## Advantages of using this endpoint before a cross-cluster search + +You may want to exclude a cluster or index from a search when: + +* A remote cluster is not currently connected and is configured with `skip_unavailable=false`. Running a cross-cluster search under those conditions will cause the entire search to fail. +* A cluster has no matching indices, aliases or data streams for the index expression (or your user does not have permissions to search them). For example, suppose your index expression is `logs*,remote1:logs*` and the remote1 cluster has no indices, aliases or data streams that match `logs*`. In that case, that cluster will return no results from that cluster if you include it in a cross-cluster search. +* The index expression (combined with any query parameters you specify) will likely cause an exception to be thrown when you do the search. In these cases, the "error" field in the `_resolve/cluster` response will be present. (This is also where security/permission errors will be shown.) +* A remote cluster is an older version that does not support the feature you want to use in your search. + +## Test availability of remote clusters + +The `remote/info` endpoint is commonly used to test whether the "local" cluster (the cluster being queried) is connected to its remote clusters, but it does not necessarily reflect whether the remote cluster is available or not. +The remote cluster may be available, while the local cluster is not currently connected to it. + +You can use the `_resolve/cluster` API to attempt to reconnect to remote clusters. +For example with `GET _resolve/cluster` or `GET _resolve/cluster/*:*`. +The `connected` field in the response will indicate whether it was successful. +If a connection was (re-)established, this will also cause the `remote/info` endpoint to now indicate a connected status. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-resolve-cluster[Endpoint documentation] +[source,ts] +---- +client.indices.resolveCluster({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`name` (Optional, string | string[])*: A list of names or index patterns for the indices, aliases, and data streams to resolve. +Resources on remote clusters can be specified using the ``:`` syntax. +Index and cluster exclusions (e.g., `-cluster1:*`) are also supported. +If no index expression is specified, information about all remote clusters configured on the local cluster +is returned without doing any index matching +** *`allow_no_indices` (Optional, boolean)*: If false, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing +or closed indices. This behavior applies even if the request targets other open indices. For example, a request +targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. +NOTE: This option is only supported when specifying an index expression. You will get an error if you specify index +options to the `_resolve/cluster` API endpoint that takes no index expression. +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +Supports a list of values, such as `open,hidden`. +Valid values are: `all`, `open`, `closed`, `hidden`, `none`. +NOTE: This option is only supported when specifying an index expression. You will get an error if you specify index +options to the `_resolve/cluster` API endpoint that takes no index expression. +** *`ignore_throttled` (Optional, boolean)*: If true, concrete, expanded, or aliased indices are ignored when frozen. +NOTE: This option is only supported when specifying an index expression. You will get an error if you specify index +options to the `_resolve/cluster` API endpoint that takes no index expression. +** *`ignore_unavailable` (Optional, boolean)*: If false, the request returns an error if it targets a missing or closed index. +NOTE: This option is only supported when specifying an index expression. You will get an error if you specify index +options to the `_resolve/cluster` API endpoint that takes no index expression. +** *`timeout` (Optional, string | -1 | 0)*: The maximum time to wait for remote clusters to respond. +If a remote cluster does not respond within this timeout period, the API response +will show the cluster as not connected and include an error message that the +request timed out. + +The default timeout is unset and the query can take +as long as the networking layer is configured to wait for remote clusters that are +not responding (typically 30 seconds). + +[discrete] +==== resolve_index +Resolve indices. +Resolve the names and/or index patterns for indices, aliases, and data streams. +Multiple patterns and remote clusters are supported. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-resolve-index[Endpoint documentation] +[source,ts] +---- +client.indices.resolveIndex({ name }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`name` (string | string[])*: Comma-separated name(s) or index pattern(s) of the indices, aliases, and data streams to resolve. +Resources on remote clusters can be specified using the ``:`` syntax. +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +Supports a list of values, such as `open,hidden`. +Valid values are: `all`, `open`, `closed`, `hidden`, `none`. +** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error if it targets a missing or closed index. +** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. +This behavior applies even if the request targets other open indices. +For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. + +[discrete] +==== rollover +Roll over to a new index. +TIP: It is recommended to use the index lifecycle rollover action to automate rollovers. + +The rollover API creates a new index for a data stream or index alias. +The API behavior depends on the rollover target. + +**Roll over a data stream** + +If you roll over a data stream, the API creates a new write index for the stream. +The stream's previous write index becomes a regular backing index. +A rollover also increments the data stream's generation. + +**Roll over an index alias with a write index** + +TIP: Prior to Elasticsearch 7.9, you'd typically use an index alias with a write index to manage time series data. +Data streams replace this functionality, require less maintenance, and automatically integrate with data tiers. + +If an index alias points to multiple indices, one of the indices must be a write index. +The rollover API creates a new write index for the alias with `is_write_index` set to `true`. +The API also `sets is_write_index` to `false` for the previous write index. + +**Roll over an index alias with one index** + +If you roll over an index alias that points to only one index, the API creates a new index for the alias and removes the original index from the alias. + +NOTE: A rollover creates a new index and is subject to the `wait_for_active_shards` setting. + +**Increment index names for an alias** + +When you roll over an index alias, you can specify a name for the new index. +If you don't specify a name and the current index ends with `-` and a number, such as `my-index-000001` or `my-index-3`, the new index name increments that number. +For example, if you roll over an alias with a current index of `my-index-000001`, the rollover creates a new index named `my-index-000002`. +This number is always six characters and zero-padded, regardless of the previous index's name. + +If you use an index alias for time series data, you can use date math in the index name to track the rollover date. +For example, you can create an alias that points to an index named ``. +If you create the index on May 6, 2099, the index's name is `my-index-2099.05.06-000001`. +If you roll over the alias on May 7, 2099, the new index's name is `my-index-2099.05.07-000002`. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-rollover[Endpoint documentation] +[source,ts] +---- +client.indices.rollover({ alias }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`alias` (string)*: Name of the data stream or index alias to roll over. +** *`new_index` (Optional, string)*: Name of the index to create. +Supports date math. +Data streams do not support this parameter. +** *`aliases` (Optional, Record)*: Aliases for the target index. +Data streams do not support this parameter. +** *`conditions` (Optional, { min_age, max_age, max_age_millis, min_docs, max_docs, max_size, max_size_bytes, min_size, min_size_bytes, max_primary_shard_size, max_primary_shard_size_bytes, min_primary_shard_size, min_primary_shard_size_bytes, max_primary_shard_docs, min_primary_shard_docs })*: Conditions for the rollover. +If specified, Elasticsearch only performs the rollover if the current index satisfies these conditions. +If this parameter is not specified, Elasticsearch performs the rollover unconditionally. +If conditions are specified, at least one of them must be a `max_*` condition. +The index will rollover if any `max_*` condition is satisfied and all `min_*` conditions are satisfied. +** *`mappings` (Optional, { all_field, date_detection, dynamic, dynamic_date_formats, dynamic_templates, _field_names, index_field, _meta, numeric_detection, properties, _routing, _size, _source, runtime, enabled, subobjects, _data_stream_timestamp })*: Mapping for fields in the index. +If specified, this mapping can include field names, field data types, and mapping paramaters. +** *`settings` (Optional, Record)*: Configuration options for the index. +Data streams do not support this parameter. +** *`dry_run` (Optional, boolean)*: If `true`, checks whether the current index satisfies the specified conditions but does not perform a rollover. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. +** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: The number of shard copies that must be active before proceeding with the operation. +Set to all or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). +** *`lazy` (Optional, boolean)*: If set to true, the rollover action will only mark a data stream to signal that it needs to be rolled over at the next write. +Only allowed on data streams. + +[discrete] +==== segments +Get index segments. +Get low-level information about the Lucene segments in index shards. +For data streams, the API returns information about the stream's backing indices. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-segments[Endpoint documentation] +[source,ts] +---- +client.indices.segments({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (Optional, string | string[])*: List of data streams, indices, and aliases used to limit the request. +Supports wildcards (`*`). +To target all data streams and indices, omit this parameter or use `*` or `_all`. +** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. +This behavior applies even if the request targets other open indices. +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +Supports a list of values, such as `open,hidden`. +Valid values are: `all`, `open`, `closed`, `hidden`, `none`. +** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error if it targets a missing or closed index. + +[discrete] +==== shard_stores +Get index shard stores. +Get store information about replica shards in one or more indices. +For data streams, the API retrieves store information for the stream's backing indices. + +The index shard stores API returns the following information: + +* The node on which each replica shard exists. +* The allocation ID for each replica shard. +* A unique ID for each replica shard. +* Any errors encountered while opening the shard index or from an earlier failure. + +By default, the API returns store information only for primary shards that are unassigned or have one or more unassigned replica shards. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-shard-stores[Endpoint documentation] +[source,ts] +---- +client.indices.shardStores({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (Optional, string | string[])*: List of data streams, indices, and aliases used to limit the request. +** *`allow_no_indices` (Optional, boolean)*: If false, the request returns an error if any wildcard expression, index alias, or _all +value targets only missing or closed indices. This behavior applies even if the request +targets other open indices. +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. If the request can target data streams, +this argument determines whether wildcard expressions match hidden data streams. +** *`ignore_unavailable` (Optional, boolean)*: If true, missing or closed indices are not included in the response. +** *`status` (Optional, Enum("green" | "yellow" | "red" | "all") | Enum("green" | "yellow" | "red" | "all")[])*: List of shard health statuses used to limit the request. + +[discrete] +==== shrink +Shrink an index. +Shrink an index into a new index with fewer primary shards. + +Before you can shrink an index: + +* The index must be read-only. +* A copy of every shard in the index must reside on the same node. +* The index must have a green health status. + +To make shard allocation easier, we recommend you also remove the index's replica shards. +You can later re-add replica shards as part of the shrink operation. + +The requested number of primary shards in the target index must be a factor of the number of shards in the source index. +For example an index with 8 primary shards can be shrunk into 4, 2 or 1 primary shards or an index with 15 primary shards can be shrunk into 5, 3 or 1. +If the number of shards in the index is a prime number it can only be shrunk into a single primary shard + Before shrinking, a (primary or replica) copy of every shard in the index must be present on the same node. + +The current write index on a data stream cannot be shrunk. In order to shrink the current write index, the data stream must first be rolled over so that a new write index is created and then the previous write index can be shrunk. + +A shrink operation: + +* Creates a new target index with the same definition as the source index, but with a smaller number of primary shards. +* Hard-links segments from the source index into the target index. If the file system does not support hard-linking, then all segments are copied into the new index, which is a much more time consuming process. Also if using multiple data paths, shards on different data paths require a full copy of segment files if they are not on the same disk since hardlinks do not work across disks. +* Recovers the target index as though it were a closed index which had just been re-opened. Recovers shards to the `.routing.allocation.initial_recovery._id` index setting. + +IMPORTANT: Indices can only be shrunk if they satisfy the following requirements: + +* The target index must not exist. +* The source index must have more primary shards than the target index. +* The number of primary shards in the target index must be a factor of the number of primary shards in the source index. The source index must have more primary shards than the target index. +* The index must not contain more than 2,147,483,519 documents in total across all shards that will be shrunk into a single shard on the target index as this is the maximum number of docs that can fit into a single shard. +* The node handling the shrink process must have sufficient free disk space to accommodate a second copy of the existing index. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-shrink[Endpoint documentation] +[source,ts] +---- +client.indices.shrink({ index, target }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (string)*: Name of the source index to shrink. +** *`target` (string)*: Name of the target index to create. +** *`aliases` (Optional, Record)*: The key is the alias name. +Index alias names support date math. +** *`settings` (Optional, Record)*: Configuration options for the target index. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. +** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: The number of shard copies that must be active before proceeding with the operation. +Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). + +[discrete] +==== simulate_index_template +Simulate an index. +Get the index configuration that would be applied to the specified index from an existing index template. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-simulate-index-template[Endpoint documentation] +[source,ts] +---- +client.indices.simulateIndexTemplate({ name }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`name` (string)*: Name of the index to simulate +** *`create` (Optional, boolean)*: Whether the index template we optionally defined in the body should only be dry-run added if new or can also replace an existing one +** *`cause` (Optional, string)*: User defined reason for dry-run creating the new template for simulation purposes +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +** *`include_defaults` (Optional, boolean)*: If true, returns all relevant default configurations for the index template. + +[discrete] +==== simulate_template +Simulate an index template. +Get the index configuration that would be applied by a particular index template. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-simulate-template[Endpoint documentation] +[source,ts] +---- +client.indices.simulateTemplate({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`name` (Optional, string)*: Name of the index template to simulate. To test a template configuration before you add it to the cluster, omit +this parameter and specify the template configuration in the request body. +** *`allow_auto_create` (Optional, boolean)*: This setting overrides the value of the `action.auto_create_index` cluster setting. +If set to `true` in a template, then indices can be automatically created using that template even if auto-creation of indices is disabled via `actions.auto_create_index`. +If set to `false`, then indices or data streams matching the template must always be explicitly created, and may never be automatically created. +** *`index_patterns` (Optional, string | string[])*: Array of wildcard (`*`) expressions used to match the names of data streams and indices during creation. +** *`composed_of` (Optional, string[])*: An ordered list of component template names. +Component templates are merged in the order specified, meaning that the last component template specified has the highest precedence. +** *`template` (Optional, { aliases, mappings, settings, lifecycle })*: Template to be applied. +It may optionally include an `aliases`, `mappings`, or `settings` configuration. +** *`data_stream` (Optional, { hidden, allow_custom_routing })*: If this object is included, the template is used to create data streams and their backing indices. +Supports an empty object. +Data streams require a matching index template with a `data_stream` object. +** *`priority` (Optional, number)*: Priority to determine index template precedence when a new data stream or index is created. +The index template with the highest priority is chosen. +If no priority is specified the template is treated as though it is of priority 0 (lowest priority). +This number is not automatically generated by Elasticsearch. +** *`version` (Optional, number)*: Version number used to manage index templates externally. +This number is not automatically generated by Elasticsearch. +** *`_meta` (Optional, Record)*: Optional user metadata about the index template. +May have any contents. +This map is not automatically generated by Elasticsearch. +** *`ignore_missing_component_templates` (Optional, string[])*: The configuration option ignore_missing_component_templates can be used when an index template +references a component template that might not exist +** *`deprecated` (Optional, boolean)*: Marks this index template as deprecated. When creating or updating a non-deprecated index template +that uses deprecated components, Elasticsearch will emit a deprecation warning. +** *`create` (Optional, boolean)*: If true, the template passed in the body is only used if no existing templates match the same index patterns. If false, the simulation uses the template with the highest priority. Note that the template is not permanently added or updated in either case; it is only used for the simulation. +** *`cause` (Optional, string)*: User defined reason for dry-run creating the new template for simulation purposes +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +** *`include_defaults` (Optional, boolean)*: If true, returns all relevant default configurations for the index template. + +[discrete] +==== split +Split an index. +Split an index into a new index with more primary shards. +* Before you can split an index: + +* The index must be read-only. +* The cluster health status must be green. + +You can do make an index read-only with the following request using the add index block API: + +---- +PUT /my_source_index/_block/write +---- + +The current write index on a data stream cannot be split. +In order to split the current write index, the data stream must first be rolled over so that a new write index is created and then the previous write index can be split. + +The number of times the index can be split (and the number of shards that each original shard can be split into) is determined by the `index.number_of_routing_shards` setting. +The number of routing shards specifies the hashing space that is used internally to distribute documents across shards with consistent hashing. +For instance, a 5 shard index with `number_of_routing_shards` set to 30 (5 x 2 x 3) could be split by a factor of 2 or 3. + +A split operation: + +* Creates a new target index with the same definition as the source index, but with a larger number of primary shards. +* Hard-links segments from the source index into the target index. If the file system doesn't support hard-linking, all segments are copied into the new index, which is a much more time consuming process. +* Hashes all documents again, after low level files are created, to delete documents that belong to a different shard. +* Recovers the target index as though it were a closed index which had just been re-opened. + +IMPORTANT: Indices can only be split if they satisfy the following requirements: + +* The target index must not exist. +* The source index must have fewer primary shards than the target index. +* The number of primary shards in the target index must be a multiple of the number of primary shards in the source index. +* The node handling the split process must have sufficient free disk space to accommodate a second copy of the existing index. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-split[Endpoint documentation] +[source,ts] +---- +client.indices.split({ index, target }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (string)*: Name of the source index to split. +** *`target` (string)*: Name of the target index to create. +** *`aliases` (Optional, Record)*: Aliases for the resulting index. +** *`settings` (Optional, Record)*: Configuration options for the target index. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. +** *`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))*: The number of shard copies that must be active before proceeding with the operation. +Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). + +[discrete] +==== stats +Get index statistics. +For data streams, the API retrieves statistics for the stream's backing indices. + +By default, the returned statistics are index-level with `primaries` and `total` aggregations. +`primaries` are the values for only the primary shards. +`total` are the accumulated values for both primary and replica shards. + +To get shard-level statistics, set the `level` parameter to `shards`. + +NOTE: When moving to another node, the shard-level statistics for a shard are cleared. +Although the shard is no longer part of the node, that node retains any node-level statistics to which the shard contributed. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-stats[Endpoint documentation] +[source,ts] +---- +client.indices.stats({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`metric` (Optional, string | string[])*: Limit the information returned the specific metrics. +** *`index` (Optional, string | string[])*: A list of index names; use `_all` or empty string to perform the operation on all indices +** *`completion_fields` (Optional, string | string[])*: List or wildcard expressions of fields to include in fielddata and suggest statistics. +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. If the request can target data streams, this argument +determines whether wildcard expressions match hidden data streams. Supports a list of values, +such as `open,hidden`. +** *`fielddata_fields` (Optional, string | string[])*: List or wildcard expressions of fields to include in fielddata statistics. +** *`fields` (Optional, string | string[])*: List or wildcard expressions of fields to include in the statistics. +** *`forbid_closed_indices` (Optional, boolean)*: If true, statistics are not collected from closed indices. +** *`groups` (Optional, string | string[])*: List of search groups to include in the search statistics. +** *`include_segment_file_sizes` (Optional, boolean)*: If true, the call reports the aggregated disk usage of each one of the Lucene index files (only applies if segment stats are requested). +** *`include_unloaded_segments` (Optional, boolean)*: If true, the response includes information from segments that are not loaded into memory. +** *`level` (Optional, Enum("cluster" | "indices" | "shards"))*: Indicates whether statistics are aggregated at the cluster, index, or shard level. + +[discrete] +==== update_aliases +Create or update an alias. +Adds a data stream or index to an alias. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-update-aliases[Endpoint documentation] +[source,ts] +---- +client.indices.updateAliases({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`actions` (Optional, { add_backing_index, remove_backing_index }[])*: Actions to perform. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. + +[discrete] +==== validate_query +Validate a query. +Validates a query without running it. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-validate-query[Endpoint documentation] +[source,ts] +---- +client.indices.validateQuery({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (Optional, string | string[])*: List of data streams, indices, and aliases to search. +Supports wildcards (`*`). +To search all data streams or indices, omit this parameter or use `*` or `_all`. +** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Query in the Lucene query string syntax. +** *`allow_no_indices` (Optional, boolean)*: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. +This behavior applies even if the request targets other open indices. +** *`all_shards` (Optional, boolean)*: If `true`, the validation is executed on all shards instead of one random shard per index. +** *`analyzer` (Optional, string)*: Analyzer to use for the query string. +This parameter can only be used when the `q` query string parameter is specified. +** *`analyze_wildcard` (Optional, boolean)*: If `true`, wildcard and prefix queries are analyzed. +** *`default_operator` (Optional, Enum("and" | "or"))*: The default operator for query string query: `AND` or `OR`. +** *`df` (Optional, string)*: Field to use as default where no field prefix is given in the query string. +This parameter can only be used when the `q` query string parameter is specified. +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +Supports a list of values, such as `open,hidden`. +Valid values are: `all`, `open`, `closed`, `hidden`, `none`. +** *`explain` (Optional, boolean)*: If `true`, the response returns detailed information if an error has occurred. +** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error if it targets a missing or closed index. +** *`lenient` (Optional, boolean)*: If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. +** *`rewrite` (Optional, boolean)*: If `true`, returns a more detailed explanation showing the actual Lucene query that will be executed. +** *`q` (Optional, string)*: Query in the Lucene query string syntax. + +[discrete] +=== inference +[discrete] +==== chat_completion_unified +Perform chat completion inference + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-unified-inference[Endpoint documentation] +[source,ts] +---- +client.inference.chatCompletionUnified({ inference_id, messages }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`inference_id` (string)*: The inference Id +** *`messages` ({ content, role, tool_call_id, tool_calls }[])*: A list of objects representing the conversation. +** *`model` (Optional, string)*: The ID of the model to use. +** *`max_completion_tokens` (Optional, number)*: The upper bound limit for the number of tokens that can be generated for a completion request. +** *`stop` (Optional, string[])*: A sequence of strings to control when the model should stop generating additional tokens. +** *`temperature` (Optional, float)*: The sampling temperature to use. +** *`tool_choice` (Optional, string | { type, function })*: Controls which tool is called by the model. +** *`tools` (Optional, { type, function }[])*: A list of tools that the model can call. +** *`top_p` (Optional, float)*: Nucleus sampling, an alternative to sampling with temperature. +** *`timeout` (Optional, string | -1 | 0)*: Specifies the amount of time to wait for the inference request to complete. + +[discrete] +==== completion +Perform completion inference on the service + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-inference[Endpoint documentation] +[source,ts] +---- +client.inference.completion({ inference_id, input }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`inference_id` (string)*: The inference Id +** *`input` (string | string[])*: Inference input. +Either a string or an array of strings. +** *`task_settings` (Optional, User-defined value)*: Optional task settings +** *`timeout` (Optional, string | -1 | 0)*: Specifies the amount of time to wait for the inference request to complete. + +[discrete] +==== delete +Delete an inference endpoint + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-delete[Endpoint documentation] +[source,ts] +---- +client.inference.delete({ inference_id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`inference_id` (string)*: The inference identifier. +** *`task_type` (Optional, Enum("sparse_embedding" | "text_embedding" | "rerank" | "completion" | "chat_completion"))*: The task type +** *`dry_run` (Optional, boolean)*: When true, the endpoint is not deleted and a list of ingest processors which reference this endpoint is returned. +** *`force` (Optional, boolean)*: When true, the inference endpoint is forcefully deleted even if it is still being used by ingest processors or semantic text fields. + +[discrete] +==== get +Get an inference endpoint + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-get[Endpoint documentation] +[source,ts] +---- +client.inference.get({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`task_type` (Optional, Enum("sparse_embedding" | "text_embedding" | "rerank" | "completion" | "chat_completion"))*: The task type +** *`inference_id` (Optional, string)*: The inference Id + +[discrete] +==== put +Create an inference endpoint. +When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. +After creating the endpoint, wait for the model deployment to complete before using it. +To verify the deployment status, use the get trained model statistics API. +Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. +Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. + +IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. +For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. +However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put[Endpoint documentation] +[source,ts] +---- +client.inference.put({ inference_id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`inference_id` (string)*: The inference Id +** *`task_type` (Optional, Enum("sparse_embedding" | "text_embedding" | "rerank" | "completion" | "chat_completion"))*: The task type +** *`inference_config` (Optional, { chunking_settings, service, service_settings, task_settings })* + +[discrete] +==== put_openai +Create an OpenAI inference endpoint. + +Create an inference endpoint to perform an inference task with the `openai` service. + +When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. +After creating the endpoint, wait for the model deployment to complete before using it. +To verify the deployment status, use the get trained model statistics API. +Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. +Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. + +{ref}/infer-service-openai.html[Endpoint documentation] +[source,ts] +---- +client.inference.putOpenai({ task_type, openai_inference_id, service, service_settings }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`task_type` (Enum("chat_completion" | "completion" | "text_embedding"))*: The type of the inference task that the model will perform. +NOTE: The `chat_completion` task type only supports streaming and only through the _stream API. +** *`openai_inference_id` (string)*: The unique identifier of the inference endpoint. +** *`service` (Enum("openai"))*: The type of service supported for the specified task type. In this case, `openai`. +** *`service_settings` ({ api_key, dimensions, model_id, organization_id, rate_limit, url })*: Settings used to install the inference model. These settings are specific to the `openai` service. +** *`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, strategy })*: The chunking configuration object. +** *`task_settings` (Optional, { user })*: Settings to configure the inference task. +These settings are specific to the task type you specified. + +[discrete] +==== put_watsonx +Create a Watsonx inference endpoint. + +Create an inference endpoint to perform an inference task with the `watsonxai` service. +You need an IBM Cloud Databases for Elasticsearch deployment to use the `watsonxai` inference service. +You can provision one through the IBM catalog, the Cloud Databases CLI plug-in, the Cloud Databases API, or Terraform. + +When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. +After creating the endpoint, wait for the model deployment to complete before using it. +To verify the deployment status, use the get trained model statistics API. +Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. +Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-watsonx[Endpoint documentation] +[source,ts] +---- +client.inference.putWatsonx({ task_type, watsonx_inference_id, service, service_settings }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`task_type` (Enum("text_embedding"))*: The task type. +The only valid task type for the model to perform is `text_embedding`. +** *`watsonx_inference_id` (string)*: The unique identifier of the inference endpoint. +** *`service` (Enum("openai"))*: The type of service supported for the specified task type. In this case, `watsonxai`. +** *`service_settings` ({ api_key, api_version, model_id, project_id, rate_limit, url })*: Settings used to install the inference model. These settings are specific to the `watsonxai` service. + +[discrete] +==== rerank +Perform rereanking inference on the service + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-inference[Endpoint documentation] +[source,ts] +---- +client.inference.rerank({ inference_id, query, input }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`inference_id` (string)*: The unique identifier for the inference endpoint. +** *`query` (string)*: Query input. +** *`input` (string | string[])*: The text on which you want to perform the inference task. +It can be a single string or an array. + +> info +> Inference endpoints for the `completion` task type currently only support a single string as input. +** *`task_settings` (Optional, User-defined value)*: Task settings for the individual inference request. +These settings are specific to the task type you specified and override the task settings specified when initializing the service. +** *`timeout` (Optional, string | -1 | 0)*: The amount of time to wait for the inference request to complete. + +[discrete] +==== sparse_embedding +Perform sparse embedding inference on the service + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-inference[Endpoint documentation] +[source,ts] +---- +client.inference.sparseEmbedding({ inference_id, input }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`inference_id` (string)*: The inference Id +** *`input` (string | string[])*: Inference input. +Either a string or an array of strings. +** *`task_settings` (Optional, User-defined value)*: Optional task settings +** *`timeout` (Optional, string | -1 | 0)*: Specifies the amount of time to wait for the inference request to complete. + +[discrete] +==== stream_completion +Perform streaming inference. +Get real-time responses for completion tasks by delivering answers incrementally, reducing response times during computation. +This API works only with the completion task type. + +IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs. + +This API requires the `monitor_inference` cluster privilege (the built-in `inference_admin` and `inference_user` roles grant this privilege). You must use a client that supports streaming. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-stream-inference[Endpoint documentation] +[source,ts] +---- +client.inference.streamCompletion({ inference_id, input }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`inference_id` (string)*: The unique identifier for the inference endpoint. +** *`input` (string | string[])*: The text on which you want to perform the inference task. +It can be a single string or an array. + +NOTE: Inference endpoints for the completion task type currently only support a single string as input. +** *`task_settings` (Optional, User-defined value)*: Optional task settings + +[discrete] +==== text_embedding +Perform text embedding inference on the service + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-inference[Endpoint documentation] +[source,ts] +---- +client.inference.textEmbedding({ inference_id, input }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`inference_id` (string)*: The inference Id +** *`input` (string | string[])*: Inference input. +Either a string or an array of strings. +** *`task_settings` (Optional, User-defined value)*: Optional task settings +** *`timeout` (Optional, string | -1 | 0)*: Specifies the amount of time to wait for the inference request to complete. + +[discrete] +==== update +Update an inference endpoint. + +Modify `task_settings`, secrets (within `service_settings`), or `num_allocations` for an inference endpoint, depending on the specific endpoint service and `task_type`. + +IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. +For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. +However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-update[Endpoint documentation] +[source,ts] +---- +client.inference.update({ inference_id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`inference_id` (string)*: The unique identifier of the inference endpoint. +** *`task_type` (Optional, Enum("sparse_embedding" | "text_embedding" | "rerank" | "completion" | "chat_completion"))*: The type of inference task that the model performs. +** *`inference_config` (Optional, { chunking_settings, service, service_settings, task_settings })* + +[discrete] +=== ingest +[discrete] +==== delete_geoip_database +Delete GeoIP database configurations. + +Delete one or more IP geolocation database configurations. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-delete-geoip-database[Endpoint documentation] +[source,ts] +---- +client.ingest.deleteGeoipDatabase({ id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`id` (string | string[])*: A list of geoip database configurations to delete +** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +** *`timeout` (Optional, string | -1 | 0)*: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + +[discrete] +==== delete_ip_location_database +Delete IP geolocation database configurations. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-delete-ip-location-database[Endpoint documentation] +[source,ts] +---- +client.ingest.deleteIpLocationDatabase({ id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`id` (string | string[])*: A list of IP location database configurations. +** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +A value of `-1` indicates that the request should never time out. +** *`timeout` (Optional, string | -1 | 0)*: The period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. +A value of `-1` indicates that the request should never time out. + +[discrete] +==== delete_pipeline +Delete pipelines. +Delete one or more ingest pipelines. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-delete-pipeline[Endpoint documentation] +[source,ts] +---- +client.ingest.deletePipeline({ id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`id` (string)*: Pipeline ID or wildcard expression of pipeline IDs used to limit the request. +To delete all ingest pipelines in a cluster, use a value of `*`. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. + +[discrete] +==== geo_ip_stats +Get GeoIP statistics. +Get download statistics for GeoIP2 databases that are used with the GeoIP processor. + +{ref}/geoip-processor.html[Endpoint documentation] +[source,ts] +---- +client.ingest.geoIpStats() +---- + + +[discrete] +==== get_geoip_database +Get GeoIP database configurations. + +Get information about one or more IP geolocation database configurations. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-get-geoip-database[Endpoint documentation] +[source,ts] +---- +client.ingest.getGeoipDatabase({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`id` (Optional, string | string[])*: A list of database configuration IDs to retrieve. +Wildcard (`*`) expressions are supported. +To get all database configurations, omit this parameter or use `*`. + +[discrete] +==== get_ip_location_database +Get IP geolocation database configurations. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-get-ip-location-database[Endpoint documentation] +[source,ts] +---- +client.ingest.getIpLocationDatabase({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`id` (Optional, string | string[])*: List of database configuration IDs to retrieve. +Wildcard (`*`) expressions are supported. +To get all database configurations, omit this parameter or use `*`. +** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +A value of `-1` indicates that the request should never time out. + +[discrete] +==== get_pipeline +Get pipelines. + +Get information about one or more ingest pipelines. +This API returns a local reference of the pipeline. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-get-pipeline[Endpoint documentation] +[source,ts] +---- +client.ingest.getPipeline({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`id` (Optional, string)*: List of pipeline IDs to retrieve. +Wildcard (`*`) expressions are supported. +To get all ingest pipelines, omit this parameter or use `*`. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +** *`summary` (Optional, boolean)*: Return pipelines without their definitions (default: false) + +[discrete] +==== processor_grok +Run a grok processor. +Extract structured fields out of a single text field within a document. +You must choose which field to extract matched fields from, as well as the grok pattern you expect will match. +A grok pattern is like a regular expression that supports aliased expressions that can be reused. + +{ref}/grok-processor.html[Endpoint documentation] +[source,ts] +---- +client.ingest.processorGrok() +---- + + +[discrete] +==== put_geoip_database +Create or update a GeoIP database configuration. + +Refer to the create or update IP geolocation database configuration API. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-put-geoip-database[Endpoint documentation] +[source,ts] +---- +client.ingest.putGeoipDatabase({ id, name, maxmind }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`id` (string)*: ID of the database configuration to create or update. +** *`name` (string)*: The provider-assigned name of the IP geolocation database to download. +** *`maxmind` ({ account_id })*: The configuration necessary to identify which IP geolocation provider to use to download the database, as well as any provider-specific configuration necessary for such downloading. +At present, the only supported provider is maxmind, and the maxmind provider requires that an account_id (string) is configured. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + +[discrete] +==== put_ip_location_database +Create or update an IP geolocation database configuration. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-put-ip-location-database[Endpoint documentation] +[source,ts] +---- +client.ingest.putIpLocationDatabase({ id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`id` (string)*: The database configuration identifier. +** *`configuration` (Optional, { name, maxmind, ipinfo })* +** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +A value of `-1` indicates that the request should never time out. +** *`timeout` (Optional, string | -1 | 0)*: The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. +If no response is received before the timeout expires, the cluster metadata update still applies but the response indicates that it was not completely acknowledged. +A value of `-1` indicates that the request should never time out. + +[discrete] +==== put_pipeline +Create or update a pipeline. +Changes made using this API take effect immediately. + +{ref}/ingest.html[Endpoint documentation] +[source,ts] +---- +client.ingest.putPipeline({ id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`id` (string)*: ID of the ingest pipeline to create or update. +** *`_meta` (Optional, Record)*: Optional metadata about the ingest pipeline. May have any contents. This map is not automatically generated by Elasticsearch. +** *`description` (Optional, string)*: Description of the ingest pipeline. +** *`on_failure` (Optional, { append, attachment, bytes, circle, community_id, convert, csv, date, date_index_name, dissect, dot_expander, drop, enrich, fail, fingerprint, foreach, ip_location, geo_grid, geoip, grok, gsub, html_strip, inference, join, json, kv, lowercase, network_direction, pipeline, redact, registered_domain, remove, rename, reroute, script, set, set_security_user, sort, split, terminate, trim, uppercase, urldecode, uri_parts, user_agent }[])*: Processors to run immediately after a processor failure. Each processor supports a processor-level `on_failure` value. If a processor without an `on_failure` value fails, Elasticsearch uses this pipeline-level parameter as a fallback. The processors in this parameter run sequentially in the order specified. Elasticsearch will not attempt to run the pipeline's remaining processors. +** *`processors` (Optional, { append, attachment, bytes, circle, community_id, convert, csv, date, date_index_name, dissect, dot_expander, drop, enrich, fail, fingerprint, foreach, ip_location, geo_grid, geoip, grok, gsub, html_strip, inference, join, json, kv, lowercase, network_direction, pipeline, redact, registered_domain, remove, rename, reroute, script, set, set_security_user, sort, split, terminate, trim, uppercase, urldecode, uri_parts, user_agent }[])*: Processors used to perform transformations on documents before indexing. Processors run sequentially in the order specified. +** *`version` (Optional, number)*: Version number used by external systems to track ingest pipelines. This parameter is intended for external systems only. Elasticsearch does not use or validate pipeline version numbers. +** *`deprecated` (Optional, boolean)*: Marks this ingest pipeline as deprecated. +When a deprecated ingest pipeline is referenced as the default or final pipeline when creating or updating a non-deprecated index template, Elasticsearch will emit a deprecation warning. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +** *`if_version` (Optional, number)*: Required version for optimistic concurrency control for pipeline updates + +[discrete] +==== simulate +Simulate a pipeline. + +Run an ingest pipeline against a set of provided documents. +You can either specify an existing pipeline to use with the provided documents or supply a pipeline definition in the body of the request. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-simulate[Endpoint documentation] +[source,ts] +---- +client.ingest.simulate({ docs }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`docs` ({ _id, _index, _source }[])*: Sample documents to test in the pipeline. +** *`id` (Optional, string)*: The pipeline to test. +If you don't specify a `pipeline` in the request body, this parameter is required. +** *`pipeline` (Optional, { description, on_failure, processors, version, deprecated, _meta })*: The pipeline to test. +If you don't specify the `pipeline` request path parameter, this parameter is required. +If you specify both this and the request path parameter, the API only uses the request path parameter. +** *`verbose` (Optional, boolean)*: If `true`, the response includes output data for each processor in the executed pipeline. + +[discrete] +=== license +[discrete] +==== delete +Delete the license. + +When the license expires, your subscription level reverts to Basic. + +If the operator privileges feature is enabled, only operator users can use this API. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-license-delete[Endpoint documentation] +[source,ts] +---- +client.license.delete({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for a connection to the master node. +** *`timeout` (Optional, string | -1 | 0)*: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + +[discrete] +==== get +Get license information. + +Get information about your Elastic license including its type, its status, when it was issued, and when it expires. + +>info +> If the master node is generating a new cluster state, the get license API may return a `404 Not Found` response. +> If you receive an unexpected 404 response after cluster startup, wait a short period and retry the request. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-license-get[Endpoint documentation] +[source,ts] +---- +client.license.get({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`accept_enterprise` (Optional, boolean)*: If `true`, this parameter returns enterprise for Enterprise license types. If `false`, this parameter returns platinum for both platinum and enterprise license types. This behavior is maintained for backwards compatibility. +This parameter is deprecated and will always be set to true in 8.x. +** *`local` (Optional, boolean)*: Specifies whether to retrieve local information. The default value is `false`, which means the information is retrieved from the master node. + +[discrete] +==== get_basic_status +Get the basic license status. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-license-get-basic-status[Endpoint documentation] +[source,ts] +---- +client.license.getBasicStatus() +---- + + +[discrete] +==== get_trial_status +Get the trial status. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-license-get-trial-status[Endpoint documentation] +[source,ts] +---- +client.license.getTrialStatus() +---- + + +[discrete] +==== post +Update the license. + +You can update your license at runtime without shutting down your nodes. +License updates take effect immediately. +If the license you are installing does not support all of the features that were available with your previous license, however, you are notified in the response. +You must then re-submit the API request with the acknowledge parameter set to true. + +NOTE: If Elasticsearch security features are enabled and you are installing a gold or higher license, you must enable TLS on the transport networking layer before you install the license. +If the operator privileges feature is enabled, only operator users can use this API. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-license-post[Endpoint documentation] +[source,ts] +---- +client.license.post({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`license` (Optional, { expiry_date_in_millis, issue_date_in_millis, start_date_in_millis, issued_to, issuer, max_nodes, max_resource_units, signature, type, uid })* +** *`licenses` (Optional, { expiry_date_in_millis, issue_date_in_millis, start_date_in_millis, issued_to, issuer, max_nodes, max_resource_units, signature, type, uid }[])*: A sequence of one or more JSON documents containing the license information. +** *`acknowledge` (Optional, boolean)*: Specifies whether you acknowledge the license changes. +** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for a connection to the master node. +** *`timeout` (Optional, string | -1 | 0)*: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + +[discrete] +==== post_start_basic +Start a basic license. + +Start an indefinite basic license, which gives access to all the basic features. + +NOTE: In order to start a basic license, you must not currently have a basic license. + +If the basic license does not support all of the features that are available with your current license, however, you are notified in the response. +You must then re-submit the API request with the `acknowledge` parameter set to `true`. + +To check the status of your basic license, use the get basic license API. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-license-post-start-basic[Endpoint documentation] +[source,ts] +---- +client.license.postStartBasic({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`acknowledge` (Optional, boolean)*: whether the user has acknowledged acknowledge messages (default: false) +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. +** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + +[discrete] +==== post_start_trial +Start a trial. +Start a 30-day trial, which gives access to all subscription features. + +NOTE: You are allowed to start a trial only if your cluster has not already activated a trial for the current major product version. +For example, if you have already activated a trial for v8.0, you cannot start a new trial until v9.0. You can, however, request an extended trial at https://www.elastic.co/trialextension. + +To check the status of your trial, use the get trial status API. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-license-post-start-trial[Endpoint documentation] +[source,ts] +---- +client.license.postStartTrial({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`acknowledge` (Optional, boolean)*: whether the user has acknowledged acknowledge messages (default: false) +** *`type_query_string` (Optional, string)* +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. + +[discrete] +=== logstash +[discrete] +==== delete_pipeline +Delete a Logstash pipeline. +Delete a pipeline that is used for Logstash Central Management. +If the request succeeds, you receive an empty response with an appropriate status code. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-logstash-delete-pipeline[Endpoint documentation] +[source,ts] +---- +client.logstash.deletePipeline({ id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`id` (string)*: An identifier for the pipeline. + +[discrete] +==== get_pipeline +Get Logstash pipelines. +Get pipelines that are used for Logstash Central Management. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-logstash-get-pipeline[Endpoint documentation] +[source,ts] +---- +client.logstash.getPipeline({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`id` (Optional, string | string[])*: A list of pipeline identifiers. + +[discrete] +==== put_pipeline +Create or update a Logstash pipeline. + +Create a pipeline that is used for Logstash Central Management. +If the specified pipeline exists, it is replaced. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-logstash-put-pipeline[Endpoint documentation] +[source,ts] +---- +client.logstash.putPipeline({ id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`id` (string)*: An identifier for the pipeline. +** *`pipeline` (Optional, { description, on_failure, processors, version, deprecated, _meta })* + +[discrete] +=== migration +[discrete] +==== deprecations +Get deprecation information. +Get information about different cluster, node, and index level settings that use deprecated features that will be removed or changed in the next major version. + +TIP: This APIs is designed for indirect use by the Upgrade Assistant. +You are strongly recommended to use the Upgrade Assistant. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-migration-deprecations[Endpoint documentation] +[source,ts] +---- +client.migration.deprecations({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (Optional, string)*: Comma-separate list of data streams or indices to check. Wildcard (*) expressions are supported. + +[discrete] +==== get_feature_upgrade_status +Get feature migration information. +Version upgrades sometimes require changes to how features store configuration information and data in system indices. +Check which features need to be migrated and the status of any migrations that are in progress. + +TIP: This API is designed for indirect use by the Upgrade Assistant. +You are strongly recommended to use the Upgrade Assistant. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-migration-get-feature-upgrade-status[Endpoint documentation] +[source,ts] +---- +client.migration.getFeatureUpgradeStatus() +---- + + +[discrete] +==== post_feature_upgrade +Start the feature migration. +Version upgrades sometimes require changes to how features store configuration information and data in system indices. +This API starts the automatic migration process. + +Some functionality might be temporarily unavailable during the migration process. + +TIP: The API is designed for indirect use by the Upgrade Assistant. We strongly recommend you use the Upgrade Assistant. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-migration-get-feature-upgrade-status[Endpoint documentation] +[source,ts] +---- +client.migration.postFeatureUpgrade() +---- + + +[discrete] +=== ml +[discrete] +==== clear_trained_model_deployment_cache +Clear trained model deployment cache. + +Cache will be cleared on all nodes where the trained model is assigned. +A trained model deployment may have an inference cache enabled. +As requests are handled by each allocated node, their responses may be cached on that individual node. +Calling this API clears the caches without restarting the deployment. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-clear-trained-model-deployment-cache[Endpoint documentation] +[source,ts] +---- +client.ml.clearTrainedModelDeploymentCache({ model_id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`model_id` (string)*: The unique identifier of the trained model. + +[discrete] +==== close_job +Close anomaly detection jobs. + +A job can be opened and closed multiple times throughout its lifecycle. A closed job cannot receive data or perform analysis operations, but you can still explore and navigate results. +When you close a job, it runs housekeeping tasks such as pruning the model history, flushing buffers, calculating final results and persisting the model snapshots. Depending upon the size of the job, it could take several minutes to close and the equivalent time to re-open. After it is closed, the job has a minimal overhead on the cluster except for maintaining its meta data. Therefore it is a best practice to close jobs that are no longer required to process data. +If you close an anomaly detection job whose datafeed is running, the request first tries to stop the datafeed. This behavior is equivalent to calling stop datafeed API with the same timeout and force parameters as the close job request. +When a datafeed that has a specified end date stops, it automatically closes its associated job. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-close-job[Endpoint documentation] +[source,ts] +---- +client.ml.closeJob({ job_id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`job_id` (string)*: Identifier for the anomaly detection job. It can be a job identifier, a group name, or a wildcard expression. You can close multiple anomaly detection jobs in a single API request by using a group name, a list of jobs, or a wildcard expression. You can close all jobs by using `_all` or by specifying `*` as the job identifier. +** *`allow_no_match` (Optional, boolean)*: Refer to the description for the `allow_no_match` query parameter. +** *`force` (Optional, boolean)*: Refer to the descriptiion for the `force` query parameter. +** *`timeout` (Optional, string | -1 | 0)*: Refer to the description for the `timeout` query parameter. + +[discrete] +==== delete_calendar +Delete a calendar. + +Remove all scheduled events from a calendar, then delete it. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-calendar[Endpoint documentation] +[source,ts] +---- +client.ml.deleteCalendar({ calendar_id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`calendar_id` (string)*: A string that uniquely identifies a calendar. + +[discrete] +==== delete_calendar_event +Delete events from a calendar. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-calendar-event[Endpoint documentation] +[source,ts] +---- +client.ml.deleteCalendarEvent({ calendar_id, event_id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`calendar_id` (string)*: A string that uniquely identifies a calendar. +** *`event_id` (string)*: Identifier for the scheduled event. +You can obtain this identifier by using the get calendar events API. + +[discrete] +==== delete_calendar_job +Delete anomaly jobs from a calendar. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-calendar-job[Endpoint documentation] +[source,ts] +---- +client.ml.deleteCalendarJob({ calendar_id, job_id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`calendar_id` (string)*: A string that uniquely identifies a calendar. +** *`job_id` (string | string[])*: An identifier for the anomaly detection jobs. It can be a job identifier, a group name, or a +list of jobs or groups. + +[discrete] +==== delete_data_frame_analytics +Delete a data frame analytics job. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-data-frame-analytics[Endpoint documentation] +[source,ts] +---- +client.ml.deleteDataFrameAnalytics({ id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`id` (string)*: Identifier for the data frame analytics job. +** *`force` (Optional, boolean)*: If `true`, it deletes a job that is not stopped; this method is quicker than stopping and deleting the job. +** *`timeout` (Optional, string | -1 | 0)*: The time to wait for the job to be deleted. + +[discrete] +==== delete_datafeed +Delete a datafeed. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-datafeed[Endpoint documentation] +[source,ts] +---- +client.ml.deleteDatafeed({ datafeed_id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`datafeed_id` (string)*: A numerical character string that uniquely identifies the datafeed. This +identifier can contain lowercase alphanumeric characters (a-z and 0-9), +hyphens, and underscores. It must start and end with alphanumeric +characters. +** *`force` (Optional, boolean)*: Use to forcefully delete a started datafeed; this method is quicker than +stopping and deleting the datafeed. + +[discrete] +==== delete_expired_data +Delete expired ML data. + +Delete all job results, model snapshots and forecast data that have exceeded +their retention days period. Machine learning state documents that are not +associated with any job are also deleted. +You can limit the request to a single or set of anomaly detection jobs by +using a job identifier, a group name, a list of jobs, or a +wildcard expression. You can delete expired data for all anomaly detection +jobs by using `_all`, by specifying `*` as the ``, or by omitting the +``. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-expired-data[Endpoint documentation] +[source,ts] +---- +client.ml.deleteExpiredData({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`job_id` (Optional, string)*: Identifier for an anomaly detection job. It can be a job identifier, a +group name, or a wildcard expression. +** *`requests_per_second` (Optional, float)*: The desired requests per second for the deletion processes. The default +behavior is no throttling. +** *`timeout` (Optional, string | -1 | 0)*: How long can the underlying delete processes run until they are canceled. + +[discrete] +==== delete_filter +Delete a filter. + +If an anomaly detection job references the filter, you cannot delete the +filter. You must update or delete the job before you can delete the filter. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-filter[Endpoint documentation] +[source,ts] +---- +client.ml.deleteFilter({ filter_id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`filter_id` (string)*: A string that uniquely identifies a filter. + +[discrete] +==== delete_forecast +Delete forecasts from a job. + +By default, forecasts are retained for 14 days. You can specify a +different retention period with the `expires_in` parameter in the forecast +jobs API. The delete forecast API enables you to delete one or more +forecasts before they expire. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-forecast[Endpoint documentation] +[source,ts] +---- +client.ml.deleteForecast({ job_id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`job_id` (string)*: Identifier for the anomaly detection job. +** *`forecast_id` (Optional, string)*: A list of forecast identifiers. If you do not specify +this optional parameter or if you specify `_all` or `*` the API deletes +all forecasts from the job. +** *`allow_no_forecasts` (Optional, boolean)*: Specifies whether an error occurs when there are no forecasts. In +particular, if this parameter is set to `false` and there are no +forecasts associated with the job, attempts to delete all forecasts +return an error. +** *`timeout` (Optional, string | -1 | 0)*: Specifies the period of time to wait for the completion of the delete +operation. When this period of time elapses, the API fails and returns an +error. + +[discrete] +==== delete_job +Delete an anomaly detection job. + +All job configuration, model state and results are deleted. +It is not currently possible to delete multiple jobs using wildcards or a +comma separated list. If you delete a job that has a datafeed, the request +first tries to delete the datafeed. This behavior is equivalent to calling +the delete datafeed API with the same timeout and force parameters as the +delete job request. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-job[Endpoint documentation] +[source,ts] +---- +client.ml.deleteJob({ job_id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`job_id` (string)*: Identifier for the anomaly detection job. +** *`force` (Optional, boolean)*: Use to forcefully delete an opened job; this method is quicker than +closing and deleting the job. +** *`delete_user_annotations` (Optional, boolean)*: Specifies whether annotations that have been added by the +user should be deleted along with any auto-generated annotations when the job is +reset. +** *`wait_for_completion` (Optional, boolean)*: Specifies whether the request should return immediately or wait until the +job deletion completes. + +[discrete] +==== delete_model_snapshot +Delete a model snapshot. + +You cannot delete the active model snapshot. To delete that snapshot, first +revert to a different one. To identify the active model snapshot, refer to +the `model_snapshot_id` in the results from the get jobs API. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-model-snapshot[Endpoint documentation] +[source,ts] +---- +client.ml.deleteModelSnapshot({ job_id, snapshot_id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`job_id` (string)*: Identifier for the anomaly detection job. +** *`snapshot_id` (string)*: Identifier for the model snapshot. + +[discrete] +==== delete_trained_model +Delete an unreferenced trained model. + +The request deletes a trained inference model that is not referenced by an ingest pipeline. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-trained-model[Endpoint documentation] +[source,ts] +---- +client.ml.deleteTrainedModel({ model_id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`model_id` (string)*: The unique identifier of the trained model. +** *`force` (Optional, boolean)*: Forcefully deletes a trained model that is referenced by ingest pipelines or has a started deployment. +** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + +[discrete] +==== delete_trained_model_alias +Delete a trained model alias. + +This API deletes an existing model alias that refers to a trained model. If +the model alias is missing or refers to a model other than the one identified +by the `model_id`, this API returns an error. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-trained-model-alias[Endpoint documentation] +[source,ts] +---- +client.ml.deleteTrainedModelAlias({ model_alias, model_id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`model_alias` (string)*: The model alias to delete. +** *`model_id` (string)*: The trained model ID to which the model alias refers. + +[discrete] +==== estimate_model_memory +Estimate job model memory usage. + +Make an estimation of the memory usage for an anomaly detection job model. +The estimate is based on analysis configuration details for the job and cardinality +estimates for the fields it references. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-estimate-model-memory[Endpoint documentation] +[source,ts] +---- +client.ml.estimateModelMemory({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`analysis_config` (Optional, { bucket_span, categorization_analyzer, categorization_field_name, categorization_filters, detectors, influencers, latency, model_prune_window, multivariate_by_fields, per_partition_categorization, summary_count_field_name })*: For a list of the properties that you can specify in the +`analysis_config` component of the body of this API. +** *`max_bucket_cardinality` (Optional, Record)*: Estimates of the highest cardinality in a single bucket that is observed +for influencer fields over the time period that the job analyzes data. +To produce a good answer, values must be provided for all influencer +fields. Providing values for fields that are not listed as `influencers` +has no effect on the estimation. +** *`overall_cardinality` (Optional, Record)*: Estimates of the cardinality that is observed for fields over the whole +time period that the job analyzes data. To produce a good answer, values +must be provided for fields referenced in the `by_field_name`, +`over_field_name` and `partition_field_name` of any detectors. Providing +values for other fields has no effect on the estimation. It can be +omitted from the request if no detectors have a `by_field_name`, +`over_field_name` or `partition_field_name`. + +[discrete] +==== evaluate_data_frame +Evaluate data frame analytics. + +The API packages together commonly used evaluation metrics for various types +of machine learning features. This has been designed for use on indexes +created by data frame analytics. Evaluation requires both a ground truth +field and an analytics result field to be present. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-evaluate-data-frame[Endpoint documentation] +[source,ts] +---- +client.ml.evaluateDataFrame({ evaluation, index }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`evaluation` ({ classification, outlier_detection, regression })*: Defines the type of evaluation you want to perform. +** *`index` (string)*: Defines the `index` in which the evaluation will be performed. +** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: A query clause that retrieves a subset of data from the source index. + +[discrete] +==== explain_data_frame_analytics +Explain data frame analytics config. + +This API provides explanations for a data frame analytics config that either +exists already or one that has not been created yet. The following +explanations are provided: +* which fields are included or not in the analysis and why, +* how much memory is estimated to be required. The estimate can be used when deciding the appropriate value for model_memory_limit setting later on. +If you have object fields or fields that are excluded via source filtering, they are not included in the explanation. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-explain-data-frame-analytics[Endpoint documentation] +[source,ts] +---- +client.ml.explainDataFrameAnalytics({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`id` (Optional, string)*: Identifier for the data frame analytics job. This identifier can contain +lowercase alphanumeric characters (a-z and 0-9), hyphens, and +underscores. It must start and end with alphanumeric characters. +** *`source` (Optional, { index, query, runtime_mappings, _source })*: The configuration of how to source the analysis data. It requires an +index. Optionally, query and _source may be specified. +** *`dest` (Optional, { index, results_field })*: The destination configuration, consisting of index and optionally +results_field (ml by default). +** *`analysis` (Optional, { classification, outlier_detection, regression })*: The analysis configuration, which contains the information necessary to +perform one of the following types of analysis: classification, outlier +detection, or regression. +** *`description` (Optional, string)*: A description of the job. +** *`model_memory_limit` (Optional, string)*: The approximate maximum amount of memory resources that are permitted for +analytical processing. If your `elasticsearch.yml` file contains an +`xpack.ml.max_model_memory_limit` setting, an error occurs when you try to +create data frame analytics jobs that have `model_memory_limit` values +greater than that setting. +** *`max_num_threads` (Optional, number)*: The maximum number of threads to be used by the analysis. Using more +threads may decrease the time necessary to complete the analysis at the +cost of using more CPU. Note that the process may use additional threads +for operational functionality other than the analysis itself. +** *`analyzed_fields` (Optional, { includes, excludes })*: Specify includes and/or excludes patterns to select which fields will be +included in the analysis. The patterns specified in excludes are applied +last, therefore excludes takes precedence. In other words, if the same +field is specified in both includes and excludes, then the field will not +be included in the analysis. +** *`allow_lazy_start` (Optional, boolean)*: Specifies whether this job can start when there is insufficient machine +learning node capacity for it to be immediately assigned to a node. + +[discrete] +==== flush_job +Force buffered data to be processed. +The flush jobs API is only applicable when sending data for analysis using +the post data API. Depending on the content of the buffer, then it might +additionally calculate new results. Both flush and close operations are +similar, however the flush is more efficient if you are expecting to send +more data for analysis. When flushing, the job remains open and is available +to continue analyzing data. A close operation additionally prunes and +persists the model state to disk and the job must be opened again before +analyzing further data. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-flush-job[Endpoint documentation] +[source,ts] +---- +client.ml.flushJob({ job_id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`job_id` (string)*: Identifier for the anomaly detection job. +** *`advance_time` (Optional, string | Unit)*: Refer to the description for the `advance_time` query parameter. +** *`calc_interim` (Optional, boolean)*: Refer to the description for the `calc_interim` query parameter. +** *`end` (Optional, string | Unit)*: Refer to the description for the `end` query parameter. +** *`skip_time` (Optional, string | Unit)*: Refer to the description for the `skip_time` query parameter. +** *`start` (Optional, string | Unit)*: Refer to the description for the `start` query parameter. + +[discrete] +==== forecast +Predict future behavior of a time series. + +Forecasts are not supported for jobs that perform population analysis; an +error occurs if you try to create a forecast for a job that has an +`over_field_name` in its configuration. Forcasts predict future behavior +based on historical data. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-forecast[Endpoint documentation] +[source,ts] +---- +client.ml.forecast({ job_id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`job_id` (string)*: Identifier for the anomaly detection job. The job must be open when you +create a forecast; otherwise, an error occurs. +** *`duration` (Optional, string | -1 | 0)*: Refer to the description for the `duration` query parameter. +** *`expires_in` (Optional, string | -1 | 0)*: Refer to the description for the `expires_in` query parameter. +** *`max_model_memory` (Optional, string)*: Refer to the description for the `max_model_memory` query parameter. + +[discrete] +==== get_buckets +Get anomaly detection job results for buckets. +The API presents a chronological view of the records, grouped by bucket. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-buckets[Endpoint documentation] +[source,ts] +---- +client.ml.getBuckets({ job_id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`job_id` (string)*: Identifier for the anomaly detection job. +** *`timestamp` (Optional, string | Unit)*: The timestamp of a single bucket result. If you do not specify this +parameter, the API returns information about all buckets. +** *`anomaly_score` (Optional, number)*: Refer to the description for the `anomaly_score` query parameter. +** *`desc` (Optional, boolean)*: Refer to the description for the `desc` query parameter. +** *`end` (Optional, string | Unit)*: Refer to the description for the `end` query parameter. +** *`exclude_interim` (Optional, boolean)*: Refer to the description for the `exclude_interim` query parameter. +** *`expand` (Optional, boolean)*: Refer to the description for the `expand` query parameter. +** *`page` (Optional, { from, size })* +** *`sort` (Optional, string)*: Refer to the desription for the `sort` query parameter. +** *`start` (Optional, string | Unit)*: Refer to the description for the `start` query parameter. +** *`from` (Optional, number)*: Skips the specified number of buckets. +** *`size` (Optional, number)*: Specifies the maximum number of buckets to obtain. + +[discrete] +==== get_calendar_events +Get info about events in calendars. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-calendar-events[Endpoint documentation] +[source,ts] +---- +client.ml.getCalendarEvents({ calendar_id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`calendar_id` (string)*: A string that uniquely identifies a calendar. You can get information for multiple calendars by using a list of ids or a wildcard expression. You can get information for all calendars by using `_all` or `*` or by omitting the calendar identifier. +** *`end` (Optional, string | Unit)*: Specifies to get events with timestamps earlier than this time. +** *`from` (Optional, number)*: Skips the specified number of events. +** *`job_id` (Optional, string)*: Specifies to get events for a specific anomaly detection job identifier or job group. It must be used with a calendar identifier of `_all` or `*`. +** *`size` (Optional, number)*: Specifies the maximum number of events to obtain. +** *`start` (Optional, string | Unit)*: Specifies to get events with timestamps after this time. + +[discrete] +==== get_calendars +Get calendar configuration info. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-calendars[Endpoint documentation] +[source,ts] +---- +client.ml.getCalendars({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`calendar_id` (Optional, string)*: A string that uniquely identifies a calendar. You can get information for multiple calendars by using a list of ids or a wildcard expression. You can get information for all calendars by using `_all` or `*` or by omitting the calendar identifier. +** *`page` (Optional, { from, size })*: This object is supported only when you omit the calendar identifier. +** *`from` (Optional, number)*: Skips the specified number of calendars. This parameter is supported only when you omit the calendar identifier. +** *`size` (Optional, number)*: Specifies the maximum number of calendars to obtain. This parameter is supported only when you omit the calendar identifier. + +[discrete] +==== get_categories +Get anomaly detection job results for categories. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-categories[Endpoint documentation] +[source,ts] +---- +client.ml.getCategories({ job_id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`job_id` (string)*: Identifier for the anomaly detection job. +** *`category_id` (Optional, string)*: Identifier for the category, which is unique in the job. If you specify +neither the category ID nor the partition_field_value, the API returns +information about all categories. If you specify only the +partition_field_value, it returns information about all categories for +the specified partition. +** *`page` (Optional, { from, size })*: Configures pagination. +This parameter has the `from` and `size` properties. +** *`from` (Optional, number)*: Skips the specified number of categories. +** *`partition_field_value` (Optional, string)*: Only return categories for the specified partition. +** *`size` (Optional, number)*: Specifies the maximum number of categories to obtain. + +[discrete] +==== get_data_frame_analytics +Get data frame analytics job configuration info. +You can get information for multiple data frame analytics jobs in a single +API request by using a list of data frame analytics jobs or a +wildcard expression. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-data-frame-analytics[Endpoint documentation] +[source,ts] +---- +client.ml.getDataFrameAnalytics({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`id` (Optional, string)*: Identifier for the data frame analytics job. If you do not specify this +option, the API returns information for the first hundred data frame +analytics jobs. +** *`allow_no_match` (Optional, boolean)*: Specifies what to do when the request: + +. Contains wildcard expressions and there are no data frame analytics +jobs that match. +. Contains the `_all` string or no identifiers and there are no matches. +. Contains wildcard expressions and there are only partial matches. + +The default value returns an empty data_frame_analytics array when there +are no matches and the subset of results when there are partial matches. +If this parameter is `false`, the request returns a 404 status code when +there are no matches or only partial matches. +** *`from` (Optional, number)*: Skips the specified number of data frame analytics jobs. +** *`size` (Optional, number)*: Specifies the maximum number of data frame analytics jobs to obtain. +** *`exclude_generated` (Optional, boolean)*: Indicates if certain fields should be removed from the configuration on +retrieval. This allows the configuration to be in an acceptable format to +be retrieved and then added to another cluster. + +[discrete] +==== get_data_frame_analytics_stats +Get data frame analytics jobs usage info. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-data-frame-analytics-stats[Endpoint documentation] +[source,ts] +---- +client.ml.getDataFrameAnalyticsStats({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`id` (Optional, string)*: Identifier for the data frame analytics job. If you do not specify this +option, the API returns information for the first hundred data frame +analytics jobs. +** *`allow_no_match` (Optional, boolean)*: Specifies what to do when the request: + +. Contains wildcard expressions and there are no data frame analytics +jobs that match. +. Contains the `_all` string or no identifiers and there are no matches. +. Contains wildcard expressions and there are only partial matches. + +The default value returns an empty data_frame_analytics array when there +are no matches and the subset of results when there are partial matches. +If this parameter is `false`, the request returns a 404 status code when +there are no matches or only partial matches. +** *`from` (Optional, number)*: Skips the specified number of data frame analytics jobs. +** *`size` (Optional, number)*: Specifies the maximum number of data frame analytics jobs to obtain. +** *`verbose` (Optional, boolean)*: Defines whether the stats response should be verbose. + +[discrete] +==== get_datafeed_stats +Get datafeeds usage info. +You can get statistics for multiple datafeeds in a single API request by +using a list of datafeeds or a wildcard expression. You can +get statistics for all datafeeds by using `_all`, by specifying `*` as the +``, or by omitting the ``. If the datafeed is stopped, the +only information you receive is the `datafeed_id` and the `state`. +This API returns a maximum of 10,000 datafeeds. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-datafeed-stats[Endpoint documentation] +[source,ts] +---- +client.ml.getDatafeedStats({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`datafeed_id` (Optional, string | string[])*: Identifier for the datafeed. It can be a datafeed identifier or a +wildcard expression. If you do not specify one of these options, the API +returns information about all datafeeds. +** *`allow_no_match` (Optional, boolean)*: Specifies what to do when the request: + +. Contains wildcard expressions and there are no datafeeds that match. +. Contains the `_all` string or no identifiers and there are no matches. +. Contains wildcard expressions and there are only partial matches. + +The default value is `true`, which returns an empty `datafeeds` array +when there are no matches and the subset of results when there are +partial matches. If this parameter is `false`, the request returns a +`404` status code when there are no matches or only partial matches. + +[discrete] +==== get_datafeeds +Get datafeeds configuration info. +You can get information for multiple datafeeds in a single API request by +using a list of datafeeds or a wildcard expression. You can +get information for all datafeeds by using `_all`, by specifying `*` as the +``, or by omitting the ``. +This API returns a maximum of 10,000 datafeeds. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-datafeeds[Endpoint documentation] +[source,ts] +---- +client.ml.getDatafeeds({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`datafeed_id` (Optional, string | string[])*: Identifier for the datafeed. It can be a datafeed identifier or a +wildcard expression. If you do not specify one of these options, the API +returns information about all datafeeds. +** *`allow_no_match` (Optional, boolean)*: Specifies what to do when the request: + +. Contains wildcard expressions and there are no datafeeds that match. +. Contains the `_all` string or no identifiers and there are no matches. +. Contains wildcard expressions and there are only partial matches. + +The default value is `true`, which returns an empty `datafeeds` array +when there are no matches and the subset of results when there are +partial matches. If this parameter is `false`, the request returns a +`404` status code when there are no matches or only partial matches. +** *`exclude_generated` (Optional, boolean)*: Indicates if certain fields should be removed from the configuration on +retrieval. This allows the configuration to be in an acceptable format to +be retrieved and then added to another cluster. + +[discrete] +==== get_filters +Get filters. +You can get a single filter or all filters. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-filters[Endpoint documentation] +[source,ts] +---- +client.ml.getFilters({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`filter_id` (Optional, string | string[])*: A string that uniquely identifies a filter. +** *`from` (Optional, number)*: Skips the specified number of filters. +** *`size` (Optional, number)*: Specifies the maximum number of filters to obtain. + +[discrete] +==== get_influencers +Get anomaly detection job results for influencers. +Influencers are the entities that have contributed to, or are to blame for, +the anomalies. Influencer results are available only if an +`influencer_field_name` is specified in the job configuration. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-influencers[Endpoint documentation] +[source,ts] +---- +client.ml.getInfluencers({ job_id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`job_id` (string)*: Identifier for the anomaly detection job. +** *`page` (Optional, { from, size })*: Configures pagination. +This parameter has the `from` and `size` properties. +** *`desc` (Optional, boolean)*: If true, the results are sorted in descending order. +** *`end` (Optional, string | Unit)*: Returns influencers with timestamps earlier than this time. +The default value means it is unset and results are not limited to +specific timestamps. +** *`exclude_interim` (Optional, boolean)*: If true, the output excludes interim results. By default, interim results +are included. +** *`influencer_score` (Optional, number)*: Returns influencers with anomaly scores greater than or equal to this +value. +** *`from` (Optional, number)*: Skips the specified number of influencers. +** *`size` (Optional, number)*: Specifies the maximum number of influencers to obtain. +** *`sort` (Optional, string)*: Specifies the sort field for the requested influencers. By default, the +influencers are sorted by the `influencer_score` value. +** *`start` (Optional, string | Unit)*: Returns influencers with timestamps after this time. The default value +means it is unset and results are not limited to specific timestamps. + +[discrete] +==== get_job_stats +Get anomaly detection jobs usage info. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-job-stats[Endpoint documentation] +[source,ts] +---- +client.ml.getJobStats({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`job_id` (Optional, string)*: Identifier for the anomaly detection job. It can be a job identifier, a +group name, a list of jobs, or a wildcard expression. If +you do not specify one of these options, the API returns information for +all anomaly detection jobs. +** *`allow_no_match` (Optional, boolean)*: Specifies what to do when the request: + +. Contains wildcard expressions and there are no jobs that match. +. Contains the _all string or no identifiers and there are no matches. +. Contains wildcard expressions and there are only partial matches. + +If `true`, the API returns an empty `jobs` array when +there are no matches and the subset of results when there are partial +matches. If `false`, the API returns a `404` status +code when there are no matches or only partial matches. + +[discrete] +==== get_jobs +Get anomaly detection jobs configuration info. +You can get information for multiple anomaly detection jobs in a single API +request by using a group name, a list of jobs, or a wildcard +expression. You can get information for all anomaly detection jobs by using +`_all`, by specifying `*` as the ``, or by omitting the ``. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-jobs[Endpoint documentation] +[source,ts] +---- +client.ml.getJobs({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`job_id` (Optional, string | string[])*: Identifier for the anomaly detection job. It can be a job identifier, a +group name, or a wildcard expression. If you do not specify one of these +options, the API returns information for all anomaly detection jobs. +** *`allow_no_match` (Optional, boolean)*: Specifies what to do when the request: + +. Contains wildcard expressions and there are no jobs that match. +. Contains the _all string or no identifiers and there are no matches. +. Contains wildcard expressions and there are only partial matches. + +The default value is `true`, which returns an empty `jobs` array when +there are no matches and the subset of results when there are partial +matches. If this parameter is `false`, the request returns a `404` status +code when there are no matches or only partial matches. +** *`exclude_generated` (Optional, boolean)*: Indicates if certain fields should be removed from the configuration on +retrieval. This allows the configuration to be in an acceptable format to +be retrieved and then added to another cluster. + +[discrete] +==== get_memory_stats +Get machine learning memory usage info. +Get information about how machine learning jobs and trained models are using memory, +on each node, both within the JVM heap, and natively, outside of the JVM. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-memory-stats[Endpoint documentation] +[source,ts] +---- +client.ml.getMemoryStats({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`node_id` (Optional, string)*: The names of particular nodes in the cluster to target. For example, `nodeId1,nodeId2` or +`ml:true` +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout +expires, the request fails and returns an error. +** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request +fails and returns an error. + +[discrete] +==== get_model_snapshot_upgrade_stats +Get anomaly detection job model snapshot upgrade usage info. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-model-snapshot-upgrade-stats[Endpoint documentation] +[source,ts] +---- +client.ml.getModelSnapshotUpgradeStats({ job_id, snapshot_id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`job_id` (string)*: Identifier for the anomaly detection job. +** *`snapshot_id` (string)*: A numerical character string that uniquely identifies the model snapshot. You can get information for multiple +snapshots by using a list or a wildcard expression. You can get all snapshots by using `_all`, +by specifying `*` as the snapshot ID, or by omitting the snapshot ID. +** *`allow_no_match` (Optional, boolean)*: Specifies what to do when the request: + + - Contains wildcard expressions and there are no jobs that match. + - Contains the _all string or no identifiers and there are no matches. + - Contains wildcard expressions and there are only partial matches. + +The default value is true, which returns an empty jobs array when there are no matches and the subset of results +when there are partial matches. If this parameter is false, the request returns a 404 status code when there are +no matches or only partial matches. + +[discrete] +==== get_model_snapshots +Get model snapshots info. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-model-snapshots[Endpoint documentation] +[source,ts] +---- +client.ml.getModelSnapshots({ job_id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`job_id` (string)*: Identifier for the anomaly detection job. +** *`snapshot_id` (Optional, string)*: A numerical character string that uniquely identifies the model snapshot. You can get information for multiple +snapshots by using a list or a wildcard expression. You can get all snapshots by using `_all`, +by specifying `*` as the snapshot ID, or by omitting the snapshot ID. +** *`desc` (Optional, boolean)*: Refer to the description for the `desc` query parameter. +** *`end` (Optional, string | Unit)*: Refer to the description for the `end` query parameter. +** *`page` (Optional, { from, size })* +** *`sort` (Optional, string)*: Refer to the description for the `sort` query parameter. +** *`start` (Optional, string | Unit)*: Refer to the description for the `start` query parameter. +** *`from` (Optional, number)*: Skips the specified number of snapshots. +** *`size` (Optional, number)*: Specifies the maximum number of snapshots to obtain. + +[discrete] +==== get_overall_buckets +Get overall bucket results. + +Retrievs overall bucket results that summarize the bucket results of +multiple anomaly detection jobs. + +The `overall_score` is calculated by combining the scores of all the +buckets within the overall bucket span. First, the maximum +`anomaly_score` per anomaly detection job in the overall bucket is +calculated. Then the `top_n` of those scores are averaged to result in +the `overall_score`. This means that you can fine-tune the +`overall_score` so that it is more or less sensitive to the number of +jobs that detect an anomaly at the same time. For example, if you set +`top_n` to `1`, the `overall_score` is the maximum bucket score in the +overall bucket. Alternatively, if you set `top_n` to the number of jobs, +the `overall_score` is high only when all jobs detect anomalies in that +overall bucket. If you set the `bucket_span` parameter (to a value +greater than its default), the `overall_score` is the maximum +`overall_score` of the overall buckets that have a span equal to the +jobs' largest bucket span. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-overall-buckets[Endpoint documentation] +[source,ts] +---- +client.ml.getOverallBuckets({ job_id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`job_id` (string)*: Identifier for the anomaly detection job. It can be a job identifier, a +group name, a list of jobs or groups, or a wildcard +expression. + +You can summarize the bucket results for all anomaly detection jobs by +using `_all` or by specifying `*` as the ``. +** *`allow_no_match` (Optional, boolean)*: Refer to the description for the `allow_no_match` query parameter. +** *`bucket_span` (Optional, string | -1 | 0)*: Refer to the description for the `bucket_span` query parameter. +** *`end` (Optional, string | Unit)*: Refer to the description for the `end` query parameter. +** *`exclude_interim` (Optional, boolean)*: Refer to the description for the `exclude_interim` query parameter. +** *`overall_score` (Optional, number | string)*: Refer to the description for the `overall_score` query parameter. +** *`start` (Optional, string | Unit)*: Refer to the description for the `start` query parameter. +** *`top_n` (Optional, number)*: Refer to the description for the `top_n` query parameter. + +[discrete] +==== get_records +Get anomaly records for an anomaly detection job. +Records contain the detailed analytical results. They describe the anomalous +activity that has been identified in the input data based on the detector +configuration. +There can be many anomaly records depending on the characteristics and size +of the input data. In practice, there are often too many to be able to +manually process them. The machine learning features therefore perform a +sophisticated aggregation of the anomaly records into buckets. +The number of record results depends on the number of anomalies found in each +bucket, which relates to the number of time series being modeled and the +number of detectors. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-records[Endpoint documentation] +[source,ts] +---- +client.ml.getRecords({ job_id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`job_id` (string)*: Identifier for the anomaly detection job. +** *`desc` (Optional, boolean)*: Refer to the description for the `desc` query parameter. +** *`end` (Optional, string | Unit)*: Refer to the description for the `end` query parameter. +** *`exclude_interim` (Optional, boolean)*: Refer to the description for the `exclude_interim` query parameter. +** *`page` (Optional, { from, size })* +** *`record_score` (Optional, number)*: Refer to the description for the `record_score` query parameter. +** *`sort` (Optional, string)*: Refer to the description for the `sort` query parameter. +** *`start` (Optional, string | Unit)*: Refer to the description for the `start` query parameter. +** *`from` (Optional, number)*: Skips the specified number of records. +** *`size` (Optional, number)*: Specifies the maximum number of records to obtain. + +[discrete] +==== get_trained_models +Get trained model configuration info. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-trained-models[Endpoint documentation] +[source,ts] +---- +client.ml.getTrainedModels({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`model_id` (Optional, string | string[])*: The unique identifier of the trained model or a model alias. + +You can get information for multiple trained models in a single API +request by using a list of model IDs or a wildcard +expression. +** *`allow_no_match` (Optional, boolean)*: Specifies what to do when the request: + +- Contains wildcard expressions and there are no models that match. +- Contains the _all string or no identifiers and there are no matches. +- Contains wildcard expressions and there are only partial matches. + +If true, it returns an empty array when there are no matches and the +subset of results when there are partial matches. +** *`decompress_definition` (Optional, boolean)*: Specifies whether the included model definition should be returned as a +JSON map (true) or in a custom compressed format (false). +** *`exclude_generated` (Optional, boolean)*: Indicates if certain fields should be removed from the configuration on +retrieval. This allows the configuration to be in an acceptable format to +be retrieved and then added to another cluster. +** *`from` (Optional, number)*: Skips the specified number of models. +** *`include` (Optional, Enum("definition" | "feature_importance_baseline" | "hyperparameters" | "total_feature_importance" | "definition_status"))*: A comma delimited string of optional fields to include in the response +body. +** *`size` (Optional, number)*: Specifies the maximum number of models to obtain. +** *`tags` (Optional, string | string[])*: A comma delimited string of tags. A trained model can have many tags, or +none. When supplied, only trained models that contain all the supplied +tags are returned. + +[discrete] +==== get_trained_models_stats +Get trained models usage info. +You can get usage information for multiple trained +models in a single API request by using a list of model IDs or a wildcard expression. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-trained-models-stats[Endpoint documentation] +[source,ts] +---- +client.ml.getTrainedModelsStats({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`model_id` (Optional, string | string[])*: The unique identifier of the trained model or a model alias. It can be a +list or a wildcard expression. +** *`allow_no_match` (Optional, boolean)*: Specifies what to do when the request: + +- Contains wildcard expressions and there are no models that match. +- Contains the _all string or no identifiers and there are no matches. +- Contains wildcard expressions and there are only partial matches. + +If true, it returns an empty array when there are no matches and the +subset of results when there are partial matches. +** *`from` (Optional, number)*: Skips the specified number of models. +** *`size` (Optional, number)*: Specifies the maximum number of models to obtain. + +[discrete] +==== infer_trained_model +Evaluate a trained model. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-infer-trained-model[Endpoint documentation] +[source,ts] +---- +client.ml.inferTrainedModel({ model_id, docs }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`model_id` (string)*: The unique identifier of the trained model. +** *`docs` (Record[])*: An array of objects to pass to the model for inference. The objects should contain a fields matching your +configured trained model input. Typically, for NLP models, the field name is `text_field`. +Currently, for NLP models, only a single value is allowed. +** *`inference_config` (Optional, { regression, classification, text_classification, zero_shot_classification, fill_mask, ner, pass_through, text_embedding, text_expansion, question_answering })*: The inference configuration updates to apply on the API call +** *`timeout` (Optional, string | -1 | 0)*: Controls the amount of time to wait for inference results. + +[discrete] +==== info +Get machine learning information. +Get defaults and limits used by machine learning. +This endpoint is designed to be used by a user interface that needs to fully +understand machine learning configurations where some options are not +specified, meaning that the defaults should be used. This endpoint may be +used to find out what those defaults are. It also provides information about +the maximum size of machine learning jobs that could run in the current +cluster configuration. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-info[Endpoint documentation] +[source,ts] +---- +client.ml.info() +---- + + +[discrete] +==== open_job +Open anomaly detection jobs. + +An anomaly detection job must be opened to be ready to receive and analyze +data. It can be opened and closed multiple times throughout its lifecycle. +When you open a new job, it starts with an empty model. +When you open an existing job, the most recent model state is automatically +loaded. The job is ready to resume its analysis from where it left off, once +new data is received. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-open-job[Endpoint documentation] +[source,ts] +---- +client.ml.openJob({ job_id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`job_id` (string)*: Identifier for the anomaly detection job. +** *`timeout` (Optional, string | -1 | 0)*: Refer to the description for the `timeout` query parameter. + +[discrete] +==== post_calendar_events +Add scheduled events to the calendar. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-post-calendar-events[Endpoint documentation] +[source,ts] +---- +client.ml.postCalendarEvents({ calendar_id, events }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`calendar_id` (string)*: A string that uniquely identifies a calendar. +** *`events` ({ calendar_id, event_id, description, end_time, start_time, skip_result, skip_model_update, force_time_shift }[])*: A list of one of more scheduled events. The event’s start and end times can be specified as integer milliseconds since the epoch or as a string in ISO 8601 format. + +[discrete] +==== post_data +Send data to an anomaly detection job for analysis. + +IMPORTANT: For each job, data can be accepted from only a single connection at a time. +It is not currently possible to post data to multiple jobs using wildcards or a list. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-post-data[Endpoint documentation] +[source,ts] +---- +client.ml.postData({ job_id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`job_id` (string)*: Identifier for the anomaly detection job. The job must have a state of open to receive and process the data. +** *`data` (Optional, TData[])* +** *`reset_end` (Optional, string | Unit)*: Specifies the end of the bucket resetting range. +** *`reset_start` (Optional, string | Unit)*: Specifies the start of the bucket resetting range. + +[discrete] +==== preview_data_frame_analytics +Preview features used by data frame analytics. +Preview the extracted features used by a data frame analytics config. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-preview-data-frame-analytics[Endpoint documentation] +[source,ts] +---- +client.ml.previewDataFrameAnalytics({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`id` (Optional, string)*: Identifier for the data frame analytics job. +** *`config` (Optional, { source, analysis, model_memory_limit, max_num_threads, analyzed_fields })*: A data frame analytics config as described in create data frame analytics +jobs. Note that `id` and `dest` don’t need to be provided in the context of +this API. + +[discrete] +==== preview_datafeed +Preview a datafeed. +This API returns the first "page" of search results from a datafeed. +You can preview an existing datafeed or provide configuration details for a datafeed +and anomaly detection job in the API. The preview shows the structure of the data +that will be passed to the anomaly detection engine. +IMPORTANT: When Elasticsearch security features are enabled, the preview uses the credentials of the user that +called the API. However, when the datafeed starts it uses the roles of the last user that created or updated the +datafeed. To get a preview that accurately reflects the behavior of the datafeed, use the appropriate credentials. +You can also use secondary authorization headers to supply the credentials. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-preview-datafeed[Endpoint documentation] +[source,ts] +---- +client.ml.previewDatafeed({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`datafeed_id` (Optional, string)*: A numerical character string that uniquely identifies the datafeed. This identifier can contain lowercase +alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric +characters. NOTE: If you use this path parameter, you cannot provide datafeed or anomaly detection job +configuration details in the request body. +** *`datafeed_config` (Optional, { aggregations, chunking_config, datafeed_id, delayed_data_check_config, frequency, indices, indices_options, job_id, max_empty_searches, query, query_delay, runtime_mappings, script_fields, scroll_size })*: The datafeed definition to preview. +** *`job_config` (Optional, { allow_lazy_open, analysis_config, analysis_limits, background_persist_interval, custom_settings, daily_model_snapshot_retention_after_days, data_description, datafeed_config, description, groups, job_id, job_type, model_plot_config, model_snapshot_retention_days, renormalization_window_days, results_index_name, results_retention_days })*: The configuration details for the anomaly detection job that is associated with the datafeed. If the +`datafeed_config` object does not include a `job_id` that references an existing anomaly detection job, you must +supply this `job_config` object. If you include both a `job_id` and a `job_config`, the latter information is +used. You cannot specify a `job_config` object unless you also supply a `datafeed_config` object. +** *`start` (Optional, string | Unit)*: The start time from where the datafeed preview should begin +** *`end` (Optional, string | Unit)*: The end time when the datafeed preview should stop + +[discrete] +==== put_calendar +Create a calendar. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-calendar[Endpoint documentation] +[source,ts] +---- +client.ml.putCalendar({ calendar_id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`calendar_id` (string)*: A string that uniquely identifies a calendar. +** *`job_ids` (Optional, string[])*: An array of anomaly detection job identifiers. +** *`description` (Optional, string)*: A description of the calendar. + +[discrete] +==== put_calendar_job +Add anomaly detection job to calendar. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-calendar-job[Endpoint documentation] +[source,ts] +---- +client.ml.putCalendarJob({ calendar_id, job_id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`calendar_id` (string)*: A string that uniquely identifies a calendar. +** *`job_id` (string | string[])*: An identifier for the anomaly detection jobs. It can be a job identifier, a group name, or a list of jobs or groups. + +[discrete] +==== put_data_frame_analytics +Create a data frame analytics job. +This API creates a data frame analytics job that performs an analysis on the +source indices and stores the outcome in a destination index. +By default, the query used in the source configuration is `{"match_all": {}}`. + +If the destination index does not exist, it is created automatically when you start the job. + +If you supply only a subset of the regression or classification parameters, hyperparameter optimization occurs. It determines a value for each of the undefined parameters. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-data-frame-analytics[Endpoint documentation] +[source,ts] +---- +client.ml.putDataFrameAnalytics({ id, analysis, dest, source }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`id` (string)*: Identifier for the data frame analytics job. This identifier can contain +lowercase alphanumeric characters (a-z and 0-9), hyphens, and +underscores. It must start and end with alphanumeric characters. +** *`analysis` ({ classification, outlier_detection, regression })*: The analysis configuration, which contains the information necessary to +perform one of the following types of analysis: classification, outlier +detection, or regression. +** *`dest` ({ index, results_field })*: The destination configuration. +** *`source` ({ index, query, runtime_mappings, _source })*: The configuration of how to source the analysis data. +** *`allow_lazy_start` (Optional, boolean)*: Specifies whether this job can start when there is insufficient machine +learning node capacity for it to be immediately assigned to a node. If +set to `false` and a machine learning node with capacity to run the job +cannot be immediately found, the API returns an error. If set to `true`, +the API does not return an error; the job waits in the `starting` state +until sufficient machine learning node capacity is available. This +behavior is also affected by the cluster-wide +`xpack.ml.max_lazy_ml_nodes` setting. +** *`analyzed_fields` (Optional, { includes, excludes })*: Specifies `includes` and/or `excludes` patterns to select which fields +will be included in the analysis. The patterns specified in `excludes` +are applied last, therefore `excludes` takes precedence. In other words, +if the same field is specified in both `includes` and `excludes`, then +the field will not be included in the analysis. If `analyzed_fields` is +not set, only the relevant fields will be included. For example, all the +numeric fields for outlier detection. +The supported fields vary for each type of analysis. Outlier detection +requires numeric or `boolean` data to analyze. The algorithms don’t +support missing values therefore fields that have data types other than +numeric or boolean are ignored. Documents where included fields contain +missing values, null values, or an array are also ignored. Therefore the +`dest` index may contain documents that don’t have an outlier score. +Regression supports fields that are numeric, `boolean`, `text`, +`keyword`, and `ip` data types. It is also tolerant of missing values. +Fields that are supported are included in the analysis, other fields are +ignored. Documents where included fields contain an array with two or +more values are also ignored. Documents in the `dest` index that don’t +contain a results field are not included in the regression analysis. +Classification supports fields that are numeric, `boolean`, `text`, +`keyword`, and `ip` data types. It is also tolerant of missing values. +Fields that are supported are included in the analysis, other fields are +ignored. Documents where included fields contain an array with two or +more values are also ignored. Documents in the `dest` index that don’t +contain a results field are not included in the classification analysis. +Classification analysis can be improved by mapping ordinal variable +values to a single number. For example, in case of age ranges, you can +model the values as `0-14 = 0`, `15-24 = 1`, `25-34 = 2`, and so on. +** *`description` (Optional, string)*: A description of the job. +** *`max_num_threads` (Optional, number)*: The maximum number of threads to be used by the analysis. Using more +threads may decrease the time necessary to complete the analysis at the +cost of using more CPU. Note that the process may use additional threads +for operational functionality other than the analysis itself. +** *`_meta` (Optional, Record)* +** *`model_memory_limit` (Optional, string)*: The approximate maximum amount of memory resources that are permitted for +analytical processing. If your `elasticsearch.yml` file contains an +`xpack.ml.max_model_memory_limit` setting, an error occurs when you try +to create data frame analytics jobs that have `model_memory_limit` values +greater than that setting. +** *`headers` (Optional, Record)* +** *`version` (Optional, string)* + +[discrete] +==== put_datafeed +Create a datafeed. +Datafeeds retrieve data from Elasticsearch for analysis by an anomaly detection job. +You can associate only one datafeed with each anomaly detection job. +The datafeed contains a query that runs at a defined interval (`frequency`). +If you are concerned about delayed data, you can add a delay (`query_delay') at each interval. +By default, the datafeed uses the following query: `{"match_all": {"boost": 1}}`. + +When Elasticsearch security features are enabled, your datafeed remembers which roles the user who created it had +at the time of creation and runs the query using those same roles. If you provide secondary authorization headers, +those credentials are used instead. +You must use Kibana, this API, or the create anomaly detection jobs API to create a datafeed. Do not add a datafeed +directly to the `.ml-config` index. Do not give users `write` privileges on the `.ml-config` index. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-datafeed[Endpoint documentation] +[source,ts] +---- +client.ml.putDatafeed({ datafeed_id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`datafeed_id` (string)*: A numerical character string that uniquely identifies the datafeed. +This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. +It must start and end with alphanumeric characters. +** *`aggregations` (Optional, Record)*: If set, the datafeed performs aggregation searches. +Support for aggregations is limited and should be used only with low cardinality data. +** *`chunking_config` (Optional, { mode, time_span })*: Datafeeds might be required to search over long time periods, for several months or years. +This search is split into time chunks in order to ensure the load on Elasticsearch is managed. +Chunking configuration controls how the size of these time chunks are calculated; +it is an advanced configuration option. +** *`delayed_data_check_config` (Optional, { check_window, enabled })*: Specifies whether the datafeed checks for missing data and the size of the window. +The datafeed can optionally search over indices that have already been read in an effort to determine whether +any data has subsequently been added to the index. If missing data is found, it is a good indication that the +`query_delay` is set too low and the data is being indexed after the datafeed has passed that moment in time. +This check runs only on real-time datafeeds. +** *`frequency` (Optional, string | -1 | 0)*: The interval at which scheduled queries are made while the datafeed runs in real time. +The default value is either the bucket span for short bucket spans, or, for longer bucket spans, a sensible +fraction of the bucket span. When `frequency` is shorter than the bucket span, interim results for the last +(partial) bucket are written then eventually overwritten by the full bucket results. If the datafeed uses +aggregations, this value must be divisible by the interval of the date histogram aggregation. +** *`indices` (Optional, string | string[])*: An array of index names. Wildcards are supported. If any of the indices are in remote clusters, the master +nodes and the machine learning nodes must have the `remote_cluster_client` role. +** *`indices_options` (Optional, { allow_no_indices, expand_wildcards, ignore_unavailable, ignore_throttled })*: Specifies index expansion options that are used during search +** *`job_id` (Optional, string)*: Identifier for the anomaly detection job. +** *`max_empty_searches` (Optional, number)*: If a real-time datafeed has never seen any data (including during any initial training period), it automatically +stops and closes the associated job after this many real-time searches return no documents. In other words, +it stops after `frequency` times `max_empty_searches` of real-time operation. If not set, a datafeed with no +end time that sees no data remains started until it is explicitly stopped. By default, it is not set. +** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: The Elasticsearch query domain-specific language (DSL). This value corresponds to the query object in an +Elasticsearch search POST body. All the options that are supported by Elasticsearch can be used, as this +object is passed verbatim to Elasticsearch. +** *`query_delay` (Optional, string | -1 | 0)*: The number of seconds behind real time that data is queried. For example, if data from 10:04 a.m. might +not be searchable in Elasticsearch until 10:06 a.m., set this property to 120 seconds. The default +value is randomly selected between `60s` and `120s`. This randomness improves the query performance +when there are multiple jobs running on the same node. +** *`runtime_mappings` (Optional, Record)*: Specifies runtime fields for the datafeed search. +** *`script_fields` (Optional, Record)*: Specifies scripts that evaluate custom expressions and returns script fields to the datafeed. +The detector configuration objects in a job can contain functions that use these script fields. +** *`scroll_size` (Optional, number)*: The size parameter that is used in Elasticsearch searches when the datafeed does not use aggregations. +The maximum value is the value of `index.max_result_window`, which is 10,000 by default. +** *`headers` (Optional, Record)* +** *`allow_no_indices` (Optional, boolean)*: If true, wildcard indices expressions that resolve into no concrete indices are ignored. This includes the `_all` +string or when no indices are specified. +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines +whether wildcard expressions match hidden data streams. Supports a list of values. +** *`ignore_throttled` (Optional, boolean)*: If true, concrete, expanded, or aliased indices are ignored when frozen. +** *`ignore_unavailable` (Optional, boolean)*: If true, unavailable indices (missing or closed) are ignored. + +[discrete] +==== put_filter +Create a filter. +A filter contains a list of strings. It can be used by one or more anomaly detection jobs. +Specifically, filters are referenced in the `custom_rules` property of detector configuration objects. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-filter[Endpoint documentation] +[source,ts] +---- +client.ml.putFilter({ filter_id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`filter_id` (string)*: A string that uniquely identifies a filter. +** *`description` (Optional, string)*: A description of the filter. +** *`items` (Optional, string[])*: The items of the filter. A wildcard `*` can be used at the beginning or the end of an item. +Up to 10000 items are allowed in each filter. + +[discrete] +==== put_job +Create an anomaly detection job. + +If you include a `datafeed_config`, you must have read index privileges on the source index. +If you include a `datafeed_config` but do not provide a query, the datafeed uses `{"match_all": {"boost": 1}}`. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-job[Endpoint documentation] +[source,ts] +---- +client.ml.putJob({ job_id, analysis_config, data_description }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`job_id` (string)*: The identifier for the anomaly detection job. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. +** *`analysis_config` ({ bucket_span, categorization_analyzer, categorization_field_name, categorization_filters, detectors, influencers, latency, model_prune_window, multivariate_by_fields, per_partition_categorization, summary_count_field_name })*: Specifies how to analyze the data. After you create a job, you cannot change the analysis configuration; all the properties are informational. +** *`data_description` ({ format, time_field, time_format, field_delimiter })*: Defines the format of the input data when you send data to the job by using the post data API. Note that when configure a datafeed, these properties are automatically set. When data is received via the post data API, it is not stored in Elasticsearch. Only the results for anomaly detection are retained. +** *`allow_lazy_open` (Optional, boolean)*: Advanced configuration option. Specifies whether this job can open when there is insufficient machine learning node capacity for it to be immediately assigned to a node. By default, if a machine learning node with capacity to run the job cannot immediately be found, the open anomaly detection jobs API returns an error. However, this is also subject to the cluster-wide `xpack.ml.max_lazy_ml_nodes` setting. If this option is set to true, the open anomaly detection jobs API does not return an error and the job waits in the opening state until sufficient machine learning node capacity is available. +** *`analysis_limits` (Optional, { categorization_examples_limit, model_memory_limit })*: Limits can be applied for the resources required to hold the mathematical models in memory. These limits are approximate and can be set per job. They do not control the memory used by other processes, for example the Elasticsearch Java processes. +** *`background_persist_interval` (Optional, string | -1 | 0)*: Advanced configuration option. The time between each periodic persistence of the model. The default value is a randomized value between 3 to 4 hours, which avoids all jobs persisting at exactly the same time. The smallest allowed value is 1 hour. For very large models (several GB), persistence could take 10-20 minutes, so do not set the `background_persist_interval` value too low. +** *`custom_settings` (Optional, User-defined value)*: Advanced configuration option. Contains custom meta data about the job. +** *`daily_model_snapshot_retention_after_days` (Optional, number)*: Advanced configuration option, which affects the automatic removal of old model snapshots for this job. It specifies a period of time (in days) after which only the first snapshot per day is retained. This period is relative to the timestamp of the most recent snapshot for this job. Valid values range from 0 to `model_snapshot_retention_days`. +** *`datafeed_config` (Optional, { aggregations, chunking_config, datafeed_id, delayed_data_check_config, frequency, indices, indices_options, job_id, max_empty_searches, query, query_delay, runtime_mappings, script_fields, scroll_size })*: Defines a datafeed for the anomaly detection job. If Elasticsearch security features are enabled, your datafeed remembers which roles the user who created it had at the time of creation and runs the query using those same roles. If you provide secondary authorization headers, those credentials are used instead. +** *`description` (Optional, string)*: A description of the job. +** *`groups` (Optional, string[])*: A list of job groups. A job can belong to no groups or many. +** *`model_plot_config` (Optional, { annotations_enabled, enabled, terms })*: This advanced configuration option stores model information along with the results. It provides a more detailed view into anomaly detection. If you enable model plot it can add considerable overhead to the performance of the system; it is not feasible for jobs with many entities. Model plot provides a simplified and indicative view of the model and its bounds. It does not display complex features such as multivariate correlations or multimodal data. As such, anomalies may occasionally be reported which cannot be seen in the model plot. Model plot config can be configured when the job is created or updated later. It must be disabled if performance issues are experienced. +** *`model_snapshot_retention_days` (Optional, number)*: Advanced configuration option, which affects the automatic removal of old model snapshots for this job. It specifies the maximum period of time (in days) that snapshots are retained. This period is relative to the timestamp of the most recent snapshot for this job. By default, snapshots ten days older than the newest snapshot are deleted. +** *`renormalization_window_days` (Optional, number)*: Advanced configuration option. The period over which adjustments to the score are applied, as new data is seen. The default value is the longer of 30 days or 100 bucket spans. +** *`results_index_name` (Optional, string)*: A text string that affects the name of the machine learning results index. By default, the job generates an index named `.ml-anomalies-shared`. +** *`results_retention_days` (Optional, number)*: Advanced configuration option. The period of time (in days) that results are retained. Age is calculated relative to the timestamp of the latest bucket result. If this property has a non-null value, once per day at 00:30 (server time), results that are the specified number of days older than the latest bucket result are deleted from Elasticsearch. The default value is null, which means all results are retained. Annotations generated by the system also count as results for retention purposes; they are deleted after the same number of days as results. Annotations added by users are retained forever. +** *`allow_no_indices` (Optional, boolean)*: If `true`, wildcard indices expressions that resolve into no concrete indices are ignored. This includes the +`_all` string or when no indices are specified. +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines +whether wildcard expressions match hidden data streams. Supports a list of values. Valid values are: + +* `all`: Match any data stream or index, including hidden ones. +* `closed`: Match closed, non-hidden indices. Also matches any non-hidden data stream. Data streams cannot be closed. +* `hidden`: Match hidden data streams and hidden indices. Must be combined with `open`, `closed`, or both. +* `none`: Wildcard patterns are not accepted. +* `open`: Match open, non-hidden indices. Also matches any non-hidden data stream. +** *`ignore_throttled` (Optional, boolean)*: If `true`, concrete, expanded or aliased indices are ignored when frozen. +** *`ignore_unavailable` (Optional, boolean)*: If `true`, unavailable indices (missing or closed) are ignored. + +[discrete] +==== put_trained_model +Create a trained model. +Enable you to supply a trained model that is not created by data frame analytics. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-trained-model[Endpoint documentation] +[source,ts] +---- +client.ml.putTrainedModel({ model_id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`model_id` (string)*: The unique identifier of the trained model. +** *`compressed_definition` (Optional, string)*: The compressed (GZipped and Base64 encoded) inference definition of the +model. If compressed_definition is specified, then definition cannot be +specified. +** *`definition` (Optional, { preprocessors, trained_model })*: The inference definition for the model. If definition is specified, then +compressed_definition cannot be specified. +** *`description` (Optional, string)*: A human-readable description of the inference trained model. +** *`inference_config` (Optional, { regression, classification, text_classification, zero_shot_classification, fill_mask, ner, pass_through, text_embedding, text_expansion, question_answering })*: The default configuration for inference. This can be either a regression +or classification configuration. It must match the underlying +definition.trained_model's target_type. For pre-packaged models such as +ELSER the config is not required. +** *`input` (Optional, { field_names })*: The input field names for the model definition. +** *`metadata` (Optional, User-defined value)*: An object map that contains metadata about the model. +** *`model_type` (Optional, Enum("tree_ensemble" | "lang_ident" | "pytorch"))*: The model type. +** *`model_size_bytes` (Optional, number)*: The estimated memory usage in bytes to keep the trained model in memory. +This property is supported only if defer_definition_decompression is true +or the model definition is not supplied. +** *`platform_architecture` (Optional, string)*: The platform architecture (if applicable) of the trained mode. If the model +only works on one platform, because it is heavily optimized for a particular +processor architecture and OS combination, then this field specifies which. +The format of the string must match the platform identifiers used by Elasticsearch, +so one of, `linux-x86_64`, `linux-aarch64`, `darwin-x86_64`, `darwin-aarch64`, +or `windows-x86_64`. For portable models (those that work independent of processor +architecture or OS features), leave this field unset. +** *`tags` (Optional, string[])*: An array of tags to organize the model. +** *`prefix_strings` (Optional, { ingest, search })*: Optional prefix strings applied at inference +** *`defer_definition_decompression` (Optional, boolean)*: If set to `true` and a `compressed_definition` is provided, +the request defers definition decompression and skips relevant +validations. +** *`wait_for_completion` (Optional, boolean)*: Whether to wait for all child operations (e.g. model download) +to complete. + +[discrete] +==== put_trained_model_alias +Create or update a trained model alias. +A trained model alias is a logical name used to reference a single trained +model. +You can use aliases instead of trained model identifiers to make it easier to +reference your models. For example, you can use aliases in inference +aggregations and processors. +An alias must be unique and refer to only a single trained model. However, +you can have multiple aliases for each trained model. +If you use this API to update an alias such that it references a different +trained model ID and the model uses a different type of data frame analytics, +an error occurs. For example, this situation occurs if you have a trained +model for regression analysis and a trained model for classification +analysis; you cannot reassign an alias from one type of trained model to +another. +If you use this API to update an alias and there are very few input fields in +common between the old and new trained models for the model alias, the API +returns a warning. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-trained-model-alias[Endpoint documentation] +[source,ts] +---- +client.ml.putTrainedModelAlias({ model_alias, model_id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`model_alias` (string)*: The alias to create or update. This value cannot end in numbers. +** *`model_id` (string)*: The identifier for the trained model that the alias refers to. +** *`reassign` (Optional, boolean)*: Specifies whether the alias gets reassigned to the specified trained +model if it is already assigned to a different model. If the alias is +already assigned and this parameter is false, the API returns an error. + +[discrete] +==== put_trained_model_definition_part +Create part of a trained model definition. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-trained-model-definition-part[Endpoint documentation] +[source,ts] +---- +client.ml.putTrainedModelDefinitionPart({ model_id, part, definition, total_definition_length, total_parts }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`model_id` (string)*: The unique identifier of the trained model. +** *`part` (number)*: The definition part number. When the definition is loaded for inference the definition parts are streamed in the +order of their part number. The first part must be `0` and the final part must be `total_parts - 1`. +** *`definition` (string)*: The definition part for the model. Must be a base64 encoded string. +** *`total_definition_length` (number)*: The total uncompressed definition length in bytes. Not base64 encoded. +** *`total_parts` (number)*: The total number of parts that will be uploaded. Must be greater than 0. + +[discrete] +==== put_trained_model_vocabulary +Create a trained model vocabulary. +This API is supported only for natural language processing (NLP) models. +The vocabulary is stored in the index as described in `inference_config.*.vocabulary` of the trained model definition. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-trained-model-vocabulary[Endpoint documentation] +[source,ts] +---- +client.ml.putTrainedModelVocabulary({ model_id, vocabulary }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`model_id` (string)*: The unique identifier of the trained model. +** *`vocabulary` (string[])*: The model vocabulary, which must not be empty. +** *`merges` (Optional, string[])*: The optional model merges if required by the tokenizer. +** *`scores` (Optional, number[])*: The optional vocabulary value scores if required by the tokenizer. + +[discrete] +==== reset_job +Reset an anomaly detection job. +All model state and results are deleted. The job is ready to start over as if +it had just been created. +It is not currently possible to reset multiple jobs using wildcards or a +comma separated list. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-reset-job[Endpoint documentation] +[source,ts] +---- +client.ml.resetJob({ job_id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`job_id` (string)*: The ID of the job to reset. +** *`wait_for_completion` (Optional, boolean)*: Should this request wait until the operation has completed before +returning. +** *`delete_user_annotations` (Optional, boolean)*: Specifies whether annotations that have been added by the +user should be deleted along with any auto-generated annotations when the job is +reset. + +[discrete] +==== revert_model_snapshot +Revert to a snapshot. +The machine learning features react quickly to anomalous input, learning new +behaviors in data. Highly anomalous input increases the variance in the +models whilst the system learns whether this is a new step-change in behavior +or a one-off event. In the case where this anomalous input is known to be a +one-off, then it might be appropriate to reset the model state to a time +before this event. For example, you might consider reverting to a saved +snapshot after Black Friday or a critical system failure. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-revert-model-snapshot[Endpoint documentation] +[source,ts] +---- +client.ml.revertModelSnapshot({ job_id, snapshot_id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`job_id` (string)*: Identifier for the anomaly detection job. +** *`snapshot_id` (string)*: You can specify `empty` as the . Reverting to the empty +snapshot means the anomaly detection job starts learning a new model from +scratch when it is started. +** *`delete_intervening_results` (Optional, boolean)*: Refer to the description for the `delete_intervening_results` query parameter. + +[discrete] +==== set_upgrade_mode +Set upgrade_mode for ML indices. +Sets a cluster wide upgrade_mode setting that prepares machine learning +indices for an upgrade. +When upgrading your cluster, in some circumstances you must restart your +nodes and reindex your machine learning indices. In those circumstances, +there must be no machine learning jobs running. You can close the machine +learning jobs, do the upgrade, then open all the jobs again. Alternatively, +you can use this API to temporarily halt tasks associated with the jobs and +datafeeds and prevent new jobs from opening. You can also use this API +during upgrades that do not require you to reindex your machine learning +indices, though stopping jobs is not a requirement in that case. +You can see the current value for the upgrade_mode setting by using the get +machine learning info API. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-set-upgrade-mode[Endpoint documentation] +[source,ts] +---- +client.ml.setUpgradeMode({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`enabled` (Optional, boolean)*: When `true`, it enables `upgrade_mode` which temporarily halts all job +and datafeed tasks and prohibits new job and datafeed tasks from +starting. +** *`timeout` (Optional, string | -1 | 0)*: The time to wait for the request to be completed. + +[discrete] +==== start_data_frame_analytics +Start a data frame analytics job. +A data frame analytics job can be started and stopped multiple times +throughout its lifecycle. +If the destination index does not exist, it is created automatically the +first time you start the data frame analytics job. The +`index.number_of_shards` and `index.number_of_replicas` settings for the +destination index are copied from the source index. If there are multiple +source indices, the destination index copies the highest setting values. The +mappings for the destination index are also copied from the source indices. +If there are any mapping conflicts, the job fails to start. +If the destination index exists, it is used as is. You can therefore set up +the destination index in advance with custom settings and mappings. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-start-data-frame-analytics[Endpoint documentation] +[source,ts] +---- +client.ml.startDataFrameAnalytics({ id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`id` (string)*: Identifier for the data frame analytics job. This identifier can contain +lowercase alphanumeric characters (a-z and 0-9), hyphens, and +underscores. It must start and end with alphanumeric characters. +** *`timeout` (Optional, string | -1 | 0)*: Controls the amount of time to wait until the data frame analytics job +starts. + +[discrete] +==== start_datafeed +Start datafeeds. + +A datafeed must be started in order to retrieve data from Elasticsearch. A datafeed can be started and stopped +multiple times throughout its lifecycle. + +Before you can start a datafeed, the anomaly detection job must be open. Otherwise, an error occurs. + +If you restart a stopped datafeed, it continues processing input data from the next millisecond after it was stopped. +If new data was indexed for that exact millisecond between stopping and starting, it will be ignored. + +When Elasticsearch security features are enabled, your datafeed remembers which roles the last user to create or +update it had at the time of creation or update and runs the query using those same roles. If you provided secondary +authorization headers when you created or updated the datafeed, those credentials are used instead. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-start-datafeed[Endpoint documentation] +[source,ts] +---- +client.ml.startDatafeed({ datafeed_id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`datafeed_id` (string)*: A numerical character string that uniquely identifies the datafeed. This identifier can contain lowercase +alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric +characters. +** *`end` (Optional, string | Unit)*: Refer to the description for the `end` query parameter. +** *`start` (Optional, string | Unit)*: Refer to the description for the `start` query parameter. +** *`timeout` (Optional, string | -1 | 0)*: Refer to the description for the `timeout` query parameter. + +[discrete] +==== start_trained_model_deployment +Start a trained model deployment. +It allocates the model to every machine learning node. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-start-trained-model-deployment[Endpoint documentation] +[source,ts] +---- +client.ml.startTrainedModelDeployment({ model_id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`model_id` (string)*: The unique identifier of the trained model. Currently, only PyTorch models are supported. +** *`adaptive_allocations` (Optional, { enabled, min_number_of_allocations, max_number_of_allocations })*: Adaptive allocations configuration. When enabled, the number of allocations +is set based on the current load. +If adaptive_allocations is enabled, do not set the number of allocations manually. +** *`cache_size` (Optional, number | string)*: The inference cache size (in memory outside the JVM heap) per node for the model. +The default value is the same size as the `model_size_bytes`. To disable the cache, +`0b` can be provided. +** *`deployment_id` (Optional, string)*: A unique identifier for the deployment of the model. +** *`number_of_allocations` (Optional, number)*: The number of model allocations on each node where the model is deployed. +All allocations on a node share the same copy of the model in memory but use +a separate set of threads to evaluate the model. +Increasing this value generally increases the throughput. +If this setting is greater than the number of hardware threads +it will automatically be changed to a value less than the number of hardware threads. +If adaptive_allocations is enabled, do not set this value, because it’s automatically set. +** *`priority` (Optional, Enum("normal" | "low"))*: The deployment priority. +** *`queue_capacity` (Optional, number)*: Specifies the number of inference requests that are allowed in the queue. After the number of requests exceeds +this value, new requests are rejected with a 429 error. +** *`threads_per_allocation` (Optional, number)*: Sets the number of threads used by each model allocation during inference. This generally increases +the inference speed. The inference process is a compute-bound process; any number +greater than the number of available hardware threads on the machine does not increase the +inference speed. If this setting is greater than the number of hardware threads +it will automatically be changed to a value less than the number of hardware threads. +** *`timeout` (Optional, string | -1 | 0)*: Specifies the amount of time to wait for the model to deploy. +** *`wait_for` (Optional, Enum("started" | "starting" | "fully_allocated"))*: Specifies the allocation status to wait for before returning. + +[discrete] +==== stop_data_frame_analytics +Stop data frame analytics jobs. +A data frame analytics job can be started and stopped multiple times +throughout its lifecycle. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-stop-data-frame-analytics[Endpoint documentation] +[source,ts] +---- +client.ml.stopDataFrameAnalytics({ id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`id` (string)*: Identifier for the data frame analytics job. This identifier can contain +lowercase alphanumeric characters (a-z and 0-9), hyphens, and +underscores. It must start and end with alphanumeric characters. +** *`allow_no_match` (Optional, boolean)*: Specifies what to do when the request: + +. Contains wildcard expressions and there are no data frame analytics +jobs that match. +. Contains the _all string or no identifiers and there are no matches. +. Contains wildcard expressions and there are only partial matches. + +The default value is true, which returns an empty data_frame_analytics +array when there are no matches and the subset of results when there are +partial matches. If this parameter is false, the request returns a 404 +status code when there are no matches or only partial matches. +** *`force` (Optional, boolean)*: If true, the data frame analytics job is stopped forcefully. +** *`timeout` (Optional, string | -1 | 0)*: Controls the amount of time to wait until the data frame analytics job +stops. Defaults to 20 seconds. + +[discrete] +==== stop_datafeed +Stop datafeeds. +A datafeed that is stopped ceases to retrieve data from Elasticsearch. A datafeed can be started and stopped +multiple times throughout its lifecycle. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-stop-datafeed[Endpoint documentation] +[source,ts] +---- +client.ml.stopDatafeed({ datafeed_id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`datafeed_id` (string)*: Identifier for the datafeed. You can stop multiple datafeeds in a single API request by using a comma-separated +list of datafeeds or a wildcard expression. You can close all datafeeds by using `_all` or by specifying `*` as +the identifier. +** *`allow_no_match` (Optional, boolean)*: Refer to the description for the `allow_no_match` query parameter. +** *`force` (Optional, boolean)*: Refer to the description for the `force` query parameter. +** *`timeout` (Optional, string | -1 | 0)*: Refer to the description for the `timeout` query parameter. + +[discrete] +==== stop_trained_model_deployment +Stop a trained model deployment. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-stop-trained-model-deployment[Endpoint documentation] +[source,ts] +---- +client.ml.stopTrainedModelDeployment({ model_id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`model_id` (string)*: The unique identifier of the trained model. +** *`allow_no_match` (Optional, boolean)*: Specifies what to do when the request: contains wildcard expressions and there are no deployments that match; +contains the `_all` string or no identifiers and there are no matches; or contains wildcard expressions and +there are only partial matches. By default, it returns an empty array when there are no matches and the subset of results when there are partial matches. +If `false`, the request returns a 404 status code when there are no matches or only partial matches. +** *`force` (Optional, boolean)*: Forcefully stops the deployment, even if it is used by ingest pipelines. You can't use these pipelines until you +restart the model deployment. + +[discrete] +==== update_data_frame_analytics +Update a data frame analytics job. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-update-data-frame-analytics[Endpoint documentation] +[source,ts] +---- +client.ml.updateDataFrameAnalytics({ id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`id` (string)*: Identifier for the data frame analytics job. This identifier can contain +lowercase alphanumeric characters (a-z and 0-9), hyphens, and +underscores. It must start and end with alphanumeric characters. +** *`description` (Optional, string)*: A description of the job. +** *`model_memory_limit` (Optional, string)*: The approximate maximum amount of memory resources that are permitted for +analytical processing. If your `elasticsearch.yml` file contains an +`xpack.ml.max_model_memory_limit` setting, an error occurs when you try +to create data frame analytics jobs that have `model_memory_limit` values +greater than that setting. +** *`max_num_threads` (Optional, number)*: The maximum number of threads to be used by the analysis. Using more +threads may decrease the time necessary to complete the analysis at the +cost of using more CPU. Note that the process may use additional threads +for operational functionality other than the analysis itself. +** *`allow_lazy_start` (Optional, boolean)*: Specifies whether this job can start when there is insufficient machine +learning node capacity for it to be immediately assigned to a node. + +[discrete] +==== update_datafeed +Update a datafeed. +You must stop and start the datafeed for the changes to be applied. +When Elasticsearch security features are enabled, your datafeed remembers which roles the user who updated it had at +the time of the update and runs the query using those same roles. If you provide secondary authorization headers, +those credentials are used instead. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-update-datafeed[Endpoint documentation] +[source,ts] +---- +client.ml.updateDatafeed({ datafeed_id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`datafeed_id` (string)*: A numerical character string that uniquely identifies the datafeed. +This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. +It must start and end with alphanumeric characters. +** *`aggregations` (Optional, Record)*: If set, the datafeed performs aggregation searches. Support for aggregations is limited and should be used only +with low cardinality data. +** *`chunking_config` (Optional, { mode, time_span })*: Datafeeds might search over long time periods, for several months or years. This search is split into time +chunks in order to ensure the load on Elasticsearch is managed. Chunking configuration controls how the size of +these time chunks are calculated; it is an advanced configuration option. +** *`delayed_data_check_config` (Optional, { check_window, enabled })*: Specifies whether the datafeed checks for missing data and the size of the window. The datafeed can optionally +search over indices that have already been read in an effort to determine whether any data has subsequently been +added to the index. If missing data is found, it is a good indication that the `query_delay` is set too low and +the data is being indexed after the datafeed has passed that moment in time. This check runs only on real-time +datafeeds. +** *`frequency` (Optional, string | -1 | 0)*: The interval at which scheduled queries are made while the datafeed runs in real time. The default value is +either the bucket span for short bucket spans, or, for longer bucket spans, a sensible fraction of the bucket +span. When `frequency` is shorter than the bucket span, interim results for the last (partial) bucket are +written then eventually overwritten by the full bucket results. If the datafeed uses aggregations, this value +must be divisible by the interval of the date histogram aggregation. +** *`indices` (Optional, string[])*: An array of index names. Wildcards are supported. If any of the indices are in remote clusters, the machine +learning nodes must have the `remote_cluster_client` role. +** *`indices_options` (Optional, { allow_no_indices, expand_wildcards, ignore_unavailable, ignore_throttled })*: Specifies index expansion options that are used during search. +** *`job_id` (Optional, string)* +** *`max_empty_searches` (Optional, number)*: If a real-time datafeed has never seen any data (including during any initial training period), it automatically +stops and closes the associated job after this many real-time searches return no documents. In other words, +it stops after `frequency` times `max_empty_searches` of real-time operation. If not set, a datafeed with no +end time that sees no data remains started until it is explicitly stopped. By default, it is not set. +** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: The Elasticsearch query domain-specific language (DSL). This value corresponds to the query object in an +Elasticsearch search POST body. All the options that are supported by Elasticsearch can be used, as this +object is passed verbatim to Elasticsearch. Note that if you change the query, the analyzed data is also +changed. Therefore, the time required to learn might be long and the understandability of the results is +unpredictable. If you want to make significant changes to the source data, it is recommended that you +clone the job and datafeed and make the amendments in the clone. Let both run in parallel and close one +when you are satisfied with the results of the job. +** *`query_delay` (Optional, string | -1 | 0)*: The number of seconds behind real time that data is queried. For example, if data from 10:04 a.m. might +not be searchable in Elasticsearch until 10:06 a.m., set this property to 120 seconds. The default +value is randomly selected between `60s` and `120s`. This randomness improves the query performance +when there are multiple jobs running on the same node. +** *`runtime_mappings` (Optional, Record)*: Specifies runtime fields for the datafeed search. +** *`script_fields` (Optional, Record)*: Specifies scripts that evaluate custom expressions and returns script fields to the datafeed. +The detector configuration objects in a job can contain functions that use these script fields. +** *`scroll_size` (Optional, number)*: The size parameter that is used in Elasticsearch searches when the datafeed does not use aggregations. +The maximum value is the value of `index.max_result_window`. +** *`allow_no_indices` (Optional, boolean)*: If `true`, wildcard indices expressions that resolve into no concrete indices are ignored. This includes the +`_all` string or when no indices are specified. +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines +whether wildcard expressions match hidden data streams. Supports a list of values. Valid values are: + +* `all`: Match any data stream or index, including hidden ones. +* `closed`: Match closed, non-hidden indices. Also matches any non-hidden data stream. Data streams cannot be closed. +* `hidden`: Match hidden data streams and hidden indices. Must be combined with `open`, `closed`, or both. +* `none`: Wildcard patterns are not accepted. +* `open`: Match open, non-hidden indices. Also matches any non-hidden data stream. +** *`ignore_throttled` (Optional, boolean)*: If `true`, concrete, expanded or aliased indices are ignored when frozen. +** *`ignore_unavailable` (Optional, boolean)*: If `true`, unavailable indices (missing or closed) are ignored. + +[discrete] +==== update_filter +Update a filter. +Updates the description of a filter, adds items, or removes items from the list. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-update-filter[Endpoint documentation] +[source,ts] +---- +client.ml.updateFilter({ filter_id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`filter_id` (string)*: A string that uniquely identifies a filter. +** *`add_items` (Optional, string[])*: The items to add to the filter. +** *`description` (Optional, string)*: A description for the filter. +** *`remove_items` (Optional, string[])*: The items to remove from the filter. + +[discrete] +==== update_job +Update an anomaly detection job. +Updates certain properties of an anomaly detection job. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-update-job[Endpoint documentation] +[source,ts] +---- +client.ml.updateJob({ job_id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`job_id` (string)*: Identifier for the job. +** *`allow_lazy_open` (Optional, boolean)*: Advanced configuration option. Specifies whether this job can open when +there is insufficient machine learning node capacity for it to be +immediately assigned to a node. If `false` and a machine learning node +with capacity to run the job cannot immediately be found, the open +anomaly detection jobs API returns an error. However, this is also +subject to the cluster-wide `xpack.ml.max_lazy_ml_nodes` setting. If this +option is set to `true`, the open anomaly detection jobs API does not +return an error and the job waits in the opening state until sufficient +machine learning node capacity is available. +** *`analysis_limits` (Optional, { model_memory_limit })* +** *`background_persist_interval` (Optional, string | -1 | 0)*: Advanced configuration option. The time between each periodic persistence +of the model. +The default value is a randomized value between 3 to 4 hours, which +avoids all jobs persisting at exactly the same time. The smallest allowed +value is 1 hour. +For very large models (several GB), persistence could take 10-20 minutes, +so do not set the value too low. +If the job is open when you make the update, you must stop the datafeed, +close the job, then reopen the job and restart the datafeed for the +changes to take effect. +** *`custom_settings` (Optional, Record)*: Advanced configuration option. Contains custom meta data about the job. +For example, it can contain custom URL information as shown in Adding +custom URLs to machine learning results. +** *`categorization_filters` (Optional, string[])* +** *`description` (Optional, string)*: A description of the job. +** *`model_plot_config` (Optional, { annotations_enabled, enabled, terms })* +** *`model_prune_window` (Optional, string | -1 | 0)* +** *`daily_model_snapshot_retention_after_days` (Optional, number)*: Advanced configuration option, which affects the automatic removal of old +model snapshots for this job. It specifies a period of time (in days) +after which only the first snapshot per day is retained. This period is +relative to the timestamp of the most recent snapshot for this job. Valid +values range from 0 to `model_snapshot_retention_days`. For jobs created +before version 7.8.0, the default value matches +`model_snapshot_retention_days`. +** *`model_snapshot_retention_days` (Optional, number)*: Advanced configuration option, which affects the automatic removal of old +model snapshots for this job. It specifies the maximum period of time (in +days) that snapshots are retained. This period is relative to the +timestamp of the most recent snapshot for this job. +** *`renormalization_window_days` (Optional, number)*: Advanced configuration option. The period over which adjustments to the +score are applied, as new data is seen. +** *`results_retention_days` (Optional, number)*: Advanced configuration option. The period of time (in days) that results +are retained. Age is calculated relative to the timestamp of the latest +bucket result. If this property has a non-null value, once per day at +00:30 (server time), results that are the specified number of days older +than the latest bucket result are deleted from Elasticsearch. The default +value is null, which means all results are retained. +** *`groups` (Optional, string[])*: A list of job groups. A job can belong to no groups or many. +** *`detectors` (Optional, { detector_index, description, custom_rules }[])*: An array of detector update objects. +** *`per_partition_categorization` (Optional, { enabled, stop_on_warn })*: Settings related to how categorization interacts with partition fields. + +[discrete] +==== update_model_snapshot +Update a snapshot. +Updates certain properties of a snapshot. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-update-model-snapshot[Endpoint documentation] +[source,ts] +---- +client.ml.updateModelSnapshot({ job_id, snapshot_id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`job_id` (string)*: Identifier for the anomaly detection job. +** *`snapshot_id` (string)*: Identifier for the model snapshot. +** *`description` (Optional, string)*: A description of the model snapshot. +** *`retain` (Optional, boolean)*: If `true`, this snapshot will not be deleted during automatic cleanup of +snapshots older than `model_snapshot_retention_days`. However, this +snapshot will be deleted when the job is deleted. + +[discrete] +==== update_trained_model_deployment +Update a trained model deployment. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-update-trained-model-deployment[Endpoint documentation] +[source,ts] +---- +client.ml.updateTrainedModelDeployment({ model_id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`model_id` (string)*: The unique identifier of the trained model. Currently, only PyTorch models are supported. +** *`number_of_allocations` (Optional, number)*: The number of model allocations on each node where the model is deployed. +All allocations on a node share the same copy of the model in memory but use +a separate set of threads to evaluate the model. +Increasing this value generally increases the throughput. +If this setting is greater than the number of hardware threads +it will automatically be changed to a value less than the number of hardware threads. +If adaptive_allocations is enabled, do not set this value, because it’s automatically set. +** *`adaptive_allocations` (Optional, { enabled, min_number_of_allocations, max_number_of_allocations })*: Adaptive allocations configuration. When enabled, the number of allocations +is set based on the current load. +If adaptive_allocations is enabled, do not set the number of allocations manually. + +[discrete] +==== upgrade_job_snapshot +Upgrade a snapshot. +Upgrade an anomaly detection model snapshot to the latest major version. +Over time, older snapshot formats are deprecated and removed. Anomaly +detection jobs support only snapshots that are from the current or previous +major version. +This API provides a means to upgrade a snapshot to the current major version. +This aids in preparing the cluster for an upgrade to the next major version. +Only one snapshot per anomaly detection job can be upgraded at a time and the +upgraded snapshot cannot be the current snapshot of the anomaly detection +job. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-upgrade-job-snapshot[Endpoint documentation] +[source,ts] +---- +client.ml.upgradeJobSnapshot({ job_id, snapshot_id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`job_id` (string)*: Identifier for the anomaly detection job. +** *`snapshot_id` (string)*: A numerical character string that uniquely identifies the model snapshot. +** *`wait_for_completion` (Optional, boolean)*: When true, the API won’t respond until the upgrade is complete. +Otherwise, it responds as soon as the upgrade task is assigned to a node. +** *`timeout` (Optional, string | -1 | 0)*: Controls the time to wait for the request to complete. + +[discrete] +=== nodes +[discrete] +==== clear_repositories_metering_archive +Clear the archived repositories metering. +Clear the archived repositories metering information in the cluster. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-nodes-clear-repositories-metering-archive[Endpoint documentation] +[source,ts] +---- +client.nodes.clearRepositoriesMeteringArchive({ node_id, max_archive_version }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`node_id` (string | string[])*: List of node IDs or names used to limit returned information. +** *`max_archive_version` (number)*: Specifies the maximum `archive_version` to be cleared from the archive. + +[discrete] +==== get_repositories_metering_info +Get cluster repositories metering. +Get repositories metering information for a cluster. +This API exposes monotonically non-decreasing counters and it is expected that clients would durably store the information needed to compute aggregations over a period of time. +Additionally, the information exposed by this API is volatile, meaning that it will not be present after node restarts. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-nodes-get-repositories-metering-info[Endpoint documentation] +[source,ts] +---- +client.nodes.getRepositoriesMeteringInfo({ node_id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`node_id` (string | string[])*: List of node IDs or names used to limit returned information. +All the nodes selective options are explained [here](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster.html#cluster-nodes). + +[discrete] +==== hot_threads +Get the hot threads for nodes. +Get a breakdown of the hot threads on each selected node in the cluster. +The output is plain text with a breakdown of the top hot threads for each node. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-nodes-hot-threads[Endpoint documentation] +[source,ts] +---- +client.nodes.hotThreads({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`node_id` (Optional, string | string[])*: List of node IDs or names used to limit returned information. +** *`ignore_idle_threads` (Optional, boolean)*: If true, known idle threads (e.g. waiting in a socket select, or to get +a task from an empty queue) are filtered out. +** *`interval` (Optional, string | -1 | 0)*: The interval to do the second sampling of threads. +** *`snapshots` (Optional, number)*: Number of samples of thread stacktrace. +** *`threads` (Optional, number)*: Specifies the number of hot threads to provide information for. +** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received +before the timeout expires, the request fails and returns an error. +** *`type` (Optional, Enum("cpu" | "wait" | "block" | "gpu" | "mem"))*: The type to sample. +** *`sort` (Optional, Enum("cpu" | "wait" | "block" | "gpu" | "mem"))*: The sort order for 'cpu' type (default: total) + +[discrete] +==== info +Get node information. + +By default, the API returns all attributes and core settings for cluster nodes. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-nodes-info[Endpoint documentation] +[source,ts] +---- +client.nodes.info({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`node_id` (Optional, string | string[])*: List of node IDs or names used to limit returned information. +** *`metric` (Optional, string | string[])*: Limits the information returned to the specific metrics. Supports a list, such as http,ingest. +** *`flat_settings` (Optional, boolean)*: If true, returns settings in flat format. +** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + +[discrete] +==== reload_secure_settings +Reload the keystore on nodes in the cluster. + +Secure settings are stored in an on-disk keystore. Certain of these settings are reloadable. +That is, you can change them on disk and reload them without restarting any nodes in the cluster. +When you have updated reloadable secure settings in your keystore, you can use this API to reload those settings on each node. + +When the Elasticsearch keystore is password protected and not simply obfuscated, you must provide the password for the keystore when you reload the secure settings. +Reloading the settings for the whole cluster assumes that the keystores for all nodes are protected with the same password; this method is allowed only when inter-node communications are encrypted. +Alternatively, you can reload the secure settings on each node by locally accessing the API and passing the node-specific Elasticsearch keystore password. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-nodes-reload-secure-settings[Endpoint documentation] +[source,ts] +---- +client.nodes.reloadSecureSettings({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`node_id` (Optional, string | string[])*: The names of particular nodes in the cluster to target. +** *`secure_settings_password` (Optional, string)*: The password for the Elasticsearch keystore. +** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. + +[discrete] +==== stats +Get node statistics. +Get statistics for nodes in a cluster. +By default, all stats are returned. You can limit the returned information by using metrics. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-nodes-stats[Endpoint documentation] +[source,ts] +---- +client.nodes.stats({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`node_id` (Optional, string | string[])*: List of node IDs or names used to limit returned information. +** *`metric` (Optional, string | string[])*: Limit the information returned to the specified metrics +** *`index_metric` (Optional, string | string[])*: Limit the information returned for indices metric to the specific index metrics. It can be used only if indices (or all) metric is specified. +** *`completion_fields` (Optional, string | string[])*: List or wildcard expressions of fields to include in fielddata and suggest statistics. +** *`fielddata_fields` (Optional, string | string[])*: List or wildcard expressions of fields to include in fielddata statistics. +** *`fields` (Optional, string | string[])*: List or wildcard expressions of fields to include in the statistics. +** *`groups` (Optional, boolean)*: List of search groups to include in the search statistics. +** *`include_segment_file_sizes` (Optional, boolean)*: If true, the call reports the aggregated disk usage of each one of the Lucene index files (only applies if segment stats are requested). +** *`level` (Optional, Enum("cluster" | "indices" | "shards"))*: Indicates whether statistics are aggregated at the cluster, index, or shard level. +** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +** *`types` (Optional, string[])*: A list of document types for the indexing index metric. +** *`include_unloaded_segments` (Optional, boolean)*: If `true`, the response includes information from segments that are not loaded into memory. + +[discrete] +==== usage +Get feature usage information. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-nodes-usage[Endpoint documentation] +[source,ts] +---- +client.nodes.usage({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`node_id` (Optional, string | string[])*: A list of node IDs or names to limit the returned information; use `_local` to return information from the node you're connecting to, leave empty to get information from all nodes +** *`metric` (Optional, string | string[])*: Limits the information returned to the specific metrics. +A list of the following options: `_all`, `rest_actions`. +** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. + +[discrete] +=== query_rules +[discrete] +==== delete_rule +Delete a query rule. +Delete a query rule within a query ruleset. +This is a destructive action that is only recoverable by re-adding the same rule with the create or update query rule API. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-query-rules-delete-rule[Endpoint documentation] +[source,ts] +---- +client.queryRules.deleteRule({ ruleset_id, rule_id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`ruleset_id` (string)*: The unique identifier of the query ruleset containing the rule to delete +** *`rule_id` (string)*: The unique identifier of the query rule within the specified ruleset to delete + +[discrete] +==== delete_ruleset +Delete a query ruleset. +Remove a query ruleset and its associated data. +This is a destructive action that is not recoverable. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-query-rules-delete-ruleset[Endpoint documentation] +[source,ts] +---- +client.queryRules.deleteRuleset({ ruleset_id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`ruleset_id` (string)*: The unique identifier of the query ruleset to delete + +[discrete] +==== get_rule +Get a query rule. +Get details about a query rule within a query ruleset. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-query-rules-get-rule[Endpoint documentation] +[source,ts] +---- +client.queryRules.getRule({ ruleset_id, rule_id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`ruleset_id` (string)*: The unique identifier of the query ruleset containing the rule to retrieve +** *`rule_id` (string)*: The unique identifier of the query rule within the specified ruleset to retrieve + +[discrete] +==== get_ruleset +Get a query ruleset. +Get details about a query ruleset. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-query-rules-get-ruleset[Endpoint documentation] +[source,ts] +---- +client.queryRules.getRuleset({ ruleset_id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`ruleset_id` (string)*: The unique identifier of the query ruleset + +[discrete] +==== list_rulesets +Get all query rulesets. +Get summarized information about the query rulesets. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-query-rules-list-rulesets[Endpoint documentation] +[source,ts] +---- +client.queryRules.listRulesets({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`from` (Optional, number)*: The offset from the first result to fetch. +** *`size` (Optional, number)*: The maximum number of results to retrieve. + +[discrete] +==== put_rule +Create or update a query rule. +Create or update a query rule within a query ruleset. + +IMPORTANT: Due to limitations within pinned queries, you can only pin documents using ids or docs, but cannot use both in single rule. +It is advised to use one or the other in query rulesets, to avoid errors. +Additionally, pinned queries have a maximum limit of 100 pinned hits. +If multiple matching rules pin more than 100 documents, only the first 100 documents are pinned in the order they are specified in the ruleset. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-query-rules-put-rule[Endpoint documentation] +[source,ts] +---- +client.queryRules.putRule({ ruleset_id, rule_id, type, criteria, actions }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`ruleset_id` (string)*: The unique identifier of the query ruleset containing the rule to be created or updated. +** *`rule_id` (string)*: The unique identifier of the query rule within the specified ruleset to be created or updated. +** *`type` (Enum("pinned" | "exclude"))*: The type of rule. +** *`criteria` ({ type, metadata, values } | { type, metadata, values }[])*: The criteria that must be met for the rule to be applied. +If multiple criteria are specified for a rule, all criteria must be met for the rule to be applied. +** *`actions` ({ ids, docs })*: The actions to take when the rule is matched. +The format of this action depends on the rule type. +** *`priority` (Optional, number)* + +[discrete] +==== put_ruleset +Create or update a query ruleset. +There is a limit of 100 rules per ruleset. +This limit can be increased by using the `xpack.applications.rules.max_rules_per_ruleset` cluster setting. + +IMPORTANT: Due to limitations within pinned queries, you can only select documents using `ids` or `docs`, but cannot use both in single rule. +It is advised to use one or the other in query rulesets, to avoid errors. +Additionally, pinned queries have a maximum limit of 100 pinned hits. +If multiple matching rules pin more than 100 documents, only the first 100 documents are pinned in the order they are specified in the ruleset. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-query-rules-put-ruleset[Endpoint documentation] +[source,ts] +---- +client.queryRules.putRuleset({ ruleset_id, rules }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`ruleset_id` (string)*: The unique identifier of the query ruleset to be created or updated. +** *`rules` ({ rule_id, type, criteria, actions, priority } | { rule_id, type, criteria, actions, priority }[])* + +[discrete] +==== test +Test a query ruleset. +Evaluate match criteria against a query ruleset to identify the rules that would match that criteria. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-query-rules-test[Endpoint documentation] +[source,ts] +---- +client.queryRules.test({ ruleset_id, match_criteria }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`ruleset_id` (string)*: The unique identifier of the query ruleset to be created or updated +** *`match_criteria` (Record)*: The match criteria to apply to rules in the given query ruleset. +Match criteria should match the keys defined in the `criteria.metadata` field of the rule. + +[discrete] +=== rollup +[discrete] +==== delete_job +Delete a rollup job. + +A job must be stopped before it can be deleted. +If you attempt to delete a started job, an error occurs. +Similarly, if you attempt to delete a nonexistent job, an exception occurs. + +IMPORTANT: When you delete a job, you remove only the process that is actively monitoring and rolling up data. +The API does not delete any previously rolled up data. +This is by design; a user may wish to roll up a static data set. +Because the data set is static, after it has been fully rolled up there is no need to keep the indexing rollup job around (as there will be no new data). +Thus the job can be deleted, leaving behind the rolled up data for analysis. +If you wish to also remove the rollup data and the rollup index contains the data for only a single job, you can delete the whole rollup index. +If the rollup index stores data from several jobs, you must issue a delete-by-query that targets the rollup job's identifier in the rollup index. For example: + +---- +POST my_rollup_index/_delete_by_query +{ + "query": { + "term": { + "_rollup.id": "the_rollup_job_id" + } + } +} +---- + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rollup-delete-job[Endpoint documentation] +[source,ts] +---- +client.rollup.deleteJob({ id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`id` (string)*: Identifier for the job. + +[discrete] +==== get_jobs +Get rollup job information. +Get the configuration, stats, and status of rollup jobs. + +NOTE: This API returns only active (both `STARTED` and `STOPPED`) jobs. +If a job was created, ran for a while, then was deleted, the API does not return any details about it. +For details about a historical rollup job, the rollup capabilities API may be more useful. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rollup-get-jobs[Endpoint documentation] +[source,ts] +---- +client.rollup.getJobs({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`id` (Optional, string)*: Identifier for the rollup job. +If it is `_all` or omitted, the API returns all rollup jobs. + +[discrete] +==== get_rollup_caps +Get the rollup job capabilities. +Get the capabilities of any rollup jobs that have been configured for a specific index or index pattern. + +This API is useful because a rollup job is often configured to rollup only a subset of fields from the source index. +Furthermore, only certain aggregations can be configured for various fields, leading to a limited subset of functionality depending on that configuration. +This API enables you to inspect an index and determine: + +. Does this index have associated rollup data somewhere in the cluster? +. If yes to the first question, what fields were rolled up, what aggregations can be performed, and where does the data live? + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rollup-get-rollup-caps[Endpoint documentation] +[source,ts] +---- +client.rollup.getRollupCaps({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`id` (Optional, string)*: Index, indices or index-pattern to return rollup capabilities for. +`_all` may be used to fetch rollup capabilities from all jobs. + +[discrete] +==== get_rollup_index_caps +Get the rollup index capabilities. +Get the rollup capabilities of all jobs inside of a rollup index. +A single rollup index may store the data for multiple rollup jobs and may have a variety of capabilities depending on those jobs. This API enables you to determine: + +* What jobs are stored in an index (or indices specified via a pattern)? +* What target indices were rolled up, what fields were used in those rollups, and what aggregations can be performed on each job? + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rollup-get-rollup-index-caps[Endpoint documentation] +[source,ts] +---- +client.rollup.getRollupIndexCaps({ index }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (string | string[])*: Data stream or index to check for rollup capabilities. +Wildcard (`*`) expressions are supported. + +[discrete] +==== put_job +Create a rollup job. + +WARNING: From 8.15.0, calling this API in a cluster with no rollup usage will fail with a message about the deprecation and planned removal of rollup features. A cluster needs to contain either a rollup job or a rollup index in order for this API to be allowed to run. + +The rollup job configuration contains all the details about how the job should run, when it indexes documents, and what future queries will be able to run against the rollup index. + +There are three main sections to the job configuration: the logistical details about the job (for example, the cron schedule), the fields that are used for grouping, and what metrics to collect for each group. + +Jobs are created in a `STOPPED` state. You can start them with the start rollup jobs API. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rollup-put-job[Endpoint documentation] +[source,ts] +---- +client.rollup.putJob({ id, cron, groups, index_pattern, page_size, rollup_index }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`id` (string)*: Identifier for the rollup job. This can be any alphanumeric string and uniquely identifies the +data that is associated with the rollup job. The ID is persistent; it is stored with the rolled +up data. If you create a job, let it run for a while, then delete the job, the data that the job +rolled up is still be associated with this job ID. You cannot create a new job with the same ID +since that could lead to problems with mismatched job configurations. +** *`cron` (string)*: A cron string which defines the intervals when the rollup job should be executed. When the interval +triggers, the indexer attempts to rollup the data in the index pattern. The cron pattern is unrelated +to the time interval of the data being rolled up. For example, you may wish to create hourly rollups +of your document but to only run the indexer on a daily basis at midnight, as defined by the cron. The +cron pattern is defined just like a Watcher cron schedule. +** *`groups` ({ date_histogram, histogram, terms })*: Defines the grouping fields and aggregations that are defined for this rollup job. These fields will then be +available later for aggregating into buckets. These aggs and fields can be used in any combination. Think of +the groups configuration as defining a set of tools that can later be used in aggregations to partition the +data. Unlike raw data, we have to think ahead to which fields and aggregations might be used. Rollups provide +enough flexibility that you simply need to determine which fields are needed, not in what order they are needed. +** *`index_pattern` (string)*: The index or index pattern to roll up. Supports wildcard-style patterns (`logstash-*`). The job attempts to +rollup the entire index or index-pattern. +** *`page_size` (number)*: The number of bucket results that are processed on each iteration of the rollup indexer. A larger value tends +to execute faster, but requires more memory during processing. This value has no effect on how the data is +rolled up; it is merely used for tweaking the speed or memory cost of the indexer. +** *`rollup_index` (string)*: The index that contains the rollup results. The index can be shared with other rollup jobs. The data is stored so that it doesn’t interfere with unrelated jobs. +** *`metrics` (Optional, { field, metrics }[])*: Defines the metrics to collect for each grouping tuple. By default, only the doc_counts are collected for each +group. To make rollup useful, you will often add metrics like averages, mins, maxes, etc. Metrics are defined +on a per-field basis and for each field you configure which metric should be collected. +** *`timeout` (Optional, string | -1 | 0)*: Time to wait for the request to complete. +** *`headers` (Optional, Record)* + +[discrete] +==== rollup_search +Search rolled-up data. +The rollup search endpoint is needed because, internally, rolled-up documents utilize a different document structure than the original data. +It rewrites standard Query DSL into a format that matches the rollup documents then takes the response and rewrites it back to what a client would expect given the original query. + +The request body supports a subset of features from the regular search API. +The following functionality is not available: + +`size`: Because rollups work on pre-aggregated data, no search hits can be returned and so size must be set to zero or omitted entirely. +`highlighter`, `suggestors`, `post_filter`, `profile`, `explain`: These are similarly disallowed. + +**Searching both historical rollup and non-rollup data** + +The rollup search API has the capability to search across both "live" non-rollup data and the aggregated rollup data. +This is done by simply adding the live indices to the URI. For example: + +---- +GET sensor-1,sensor_rollup/_rollup_search +{ + "size": 0, + "aggregations": { + "max_temperature": { + "max": { + "field": "temperature" + } + } + } +} +---- + +The rollup search endpoint does two things when the search runs: + +* The original request is sent to the non-rollup index unaltered. +* A rewritten version of the original request is sent to the rollup index. + +When the two responses are received, the endpoint rewrites the rollup response and merges the two together. +During the merging process, if there is any overlap in buckets between the two responses, the buckets from the non-rollup index are used. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rollup-rollup-search[Endpoint documentation] +[source,ts] +---- +client.rollup.rollupSearch({ index }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (string | string[])*: A list of data streams and indices used to limit the request. +This parameter has the following rules: + +* At least one data stream, index, or wildcard expression must be specified. This target can include a rollup or non-rollup index. For data streams, the stream's backing indices can only serve as non-rollup indices. Omitting the parameter or using `_all` are not permitted. +* Multiple non-rollup indices may be specified. +* Only one rollup index may be specified. If more than one are supplied, an exception occurs. +* Wildcard expressions (`*`) may be used. If they match more than one rollup index, an exception occurs. However, you can use an expression to match multiple non-rollup indices or data streams. +** *`aggregations` (Optional, Record)*: Specifies aggregations. +** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: Specifies a DSL query that is subject to some limitations. +** *`size` (Optional, number)*: Must be zero if set, as rollups work on pre-aggregated data. +** *`rest_total_hits_as_int` (Optional, boolean)*: Indicates whether hits.total should be rendered as an integer or an object in the rest search response +** *`typed_keys` (Optional, boolean)*: Specify whether aggregation and suggester names should be prefixed by their respective types in the response + +[discrete] +==== start_job +Start rollup jobs. +If you try to start a job that does not exist, an exception occurs. +If you try to start a job that is already started, nothing happens. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rollup-start-job[Endpoint documentation] +[source,ts] +---- +client.rollup.startJob({ id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`id` (string)*: Identifier for the rollup job. + +[discrete] +==== stop_job +Stop rollup jobs. +If you try to stop a job that does not exist, an exception occurs. +If you try to stop a job that is already stopped, nothing happens. + +Since only a stopped job can be deleted, it can be useful to block the API until the indexer has fully stopped. +This is accomplished with the `wait_for_completion` query parameter, and optionally a timeout. For example: + +---- +POST _rollup/job/sensor/_stop?wait_for_completion=true&timeout=10s +---- +The parameter blocks the API call from returning until either the job has moved to STOPPED or the specified time has elapsed. +If the specified time elapses without the job moving to STOPPED, a timeout exception occurs. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rollup-stop-job[Endpoint documentation] +[source,ts] +---- +client.rollup.stopJob({ id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`id` (string)*: Identifier for the rollup job. +** *`timeout` (Optional, string | -1 | 0)*: If `wait_for_completion` is `true`, the API blocks for (at maximum) the specified duration while waiting for the job to stop. +If more than `timeout` time has passed, the API throws a timeout exception. +NOTE: Even if a timeout occurs, the stop request is still processing and eventually moves the job to STOPPED. +The timeout simply means the API call itself timed out while waiting for the status change. +** *`wait_for_completion` (Optional, boolean)*: If set to `true`, causes the API to block until the indexer state completely stops. +If set to `false`, the API returns immediately and the indexer is stopped asynchronously in the background. + +[discrete] +=== search_application +[discrete] +==== delete +Delete a search application. + +Remove a search application and its associated alias. Indices attached to the search application are not removed. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-delete[Endpoint documentation] +[source,ts] +---- +client.searchApplication.delete({ name }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`name` (string)*: The name of the search application to delete. + +[discrete] +==== delete_behavioral_analytics +Delete a behavioral analytics collection. +The associated data stream is also deleted. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-delete-behavioral-analytics[Endpoint documentation] +[source,ts] +---- +client.searchApplication.deleteBehavioralAnalytics({ name }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`name` (string)*: The name of the analytics collection to be deleted + +[discrete] +==== get +Get search application details. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-get[Endpoint documentation] +[source,ts] +---- +client.searchApplication.get({ name }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`name` (string)*: The name of the search application + +[discrete] +==== get_behavioral_analytics +Get behavioral analytics collections. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-get-behavioral-analytics[Endpoint documentation] +[source,ts] +---- +client.searchApplication.getBehavioralAnalytics({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`name` (Optional, string[])*: A list of analytics collections to limit the returned information + +[discrete] +==== list +Get search applications. +Get information about search applications. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-get-behavioral-analytics[Endpoint documentation] +[source,ts] +---- +client.searchApplication.list({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`q` (Optional, string)*: Query in the Lucene query string syntax. +** *`from` (Optional, number)*: Starting offset. +** *`size` (Optional, number)*: Specifies a max number of results to get. + +[discrete] +==== post_behavioral_analytics_event +Create a behavioral analytics collection event. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-post-behavioral-analytics-event[Endpoint documentation] +[source,ts] +---- +client.searchApplication.postBehavioralAnalyticsEvent({ collection_name, event_type }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`collection_name` (string)*: The name of the behavioral analytics collection. +** *`event_type` (Enum("page_view" | "search" | "search_click"))*: The analytics event type. +** *`payload` (Optional, User-defined value)* +** *`debug` (Optional, boolean)*: Whether the response type has to include more details + +[discrete] +==== put +Create or update a search application. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-put[Endpoint documentation] +[source,ts] +---- +client.searchApplication.put({ name }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`name` (string)*: The name of the search application to be created or updated. +** *`search_application` (Optional, { indices, analytics_collection_name, template })* +** *`create` (Optional, boolean)*: If `true`, this request cannot replace or update existing Search Applications. + +[discrete] +==== put_behavioral_analytics +Create a behavioral analytics collection. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-put-behavioral-analytics[Endpoint documentation] +[source,ts] +---- +client.searchApplication.putBehavioralAnalytics({ name }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`name` (string)*: The name of the analytics collection to be created or updated. + +[discrete] +==== render_query +Render a search application query. +Generate an Elasticsearch query using the specified query parameters and the search template associated with the search application or a default template if none is specified. +If a parameter used in the search template is not specified in `params`, the parameter's default value will be used. +The API returns the specific Elasticsearch query that would be generated and run by calling the search application search API. + +You must have `read` privileges on the backing alias of the search application. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-render-query[Endpoint documentation] +[source,ts] +---- +client.searchApplication.renderQuery({ name }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`name` (string)*: The name of the search application to render teh query for. +** *`params` (Optional, Record)* + +[discrete] +==== search +Run a search application search. +Generate and run an Elasticsearch query that uses the specified query parameteter and the search template associated with the search application or default template. +Unspecified template parameters are assigned their default values if applicable. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-search[Endpoint documentation] +[source,ts] +---- +client.searchApplication.search({ name }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`name` (string)*: The name of the search application to be searched. +** *`params` (Optional, Record)*: Query parameters specific to this request, which will override any defaults specified in the template. +** *`typed_keys` (Optional, boolean)*: Determines whether aggregation names are prefixed by their respective types in the response. + +[discrete] +=== searchable_snapshots +[discrete] +==== cache_stats +Get cache statistics. +Get statistics about the shared cache for partially mounted indices. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-searchable-snapshots-cache-stats[Endpoint documentation] +[source,ts] +---- +client.searchableSnapshots.cacheStats({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`node_id` (Optional, string | string[])*: The names of the nodes in the cluster to target. +** *`master_timeout` (Optional, string | -1 | 0)* + +[discrete] +==== clear_cache +Clear the cache. +Clear indices and data streams from the shared cache for partially mounted indices. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-searchable-snapshots-clear-cache[Endpoint documentation] +[source,ts] +---- +client.searchableSnapshots.clearCache({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (Optional, string | string[])*: A list of data streams, indices, and aliases to clear from the cache. +It supports wildcards (`*`). +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Whether to expand wildcard expression to concrete indices that are open, closed or both. +** *`allow_no_indices` (Optional, boolean)*: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) +** *`ignore_unavailable` (Optional, boolean)*: Whether specified concrete indices should be ignored when unavailable (missing or closed) + +[discrete] +==== mount +Mount a snapshot. +Mount a snapshot as a searchable snapshot index. +Do not use this API for snapshots managed by index lifecycle management (ILM). +Manually mounting ILM-managed snapshots can interfere with ILM processes. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-searchable-snapshots-mount[Endpoint documentation] +[source,ts] +---- +client.searchableSnapshots.mount({ repository, snapshot, index }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`repository` (string)*: The name of the repository containing the snapshot of the index to mount. +** *`snapshot` (string)*: The name of the snapshot of the index to mount. +** *`index` (string)*: The name of the index contained in the snapshot whose data is to be mounted. +If no `renamed_index` is specified, this name will also be used to create the new index. +** *`renamed_index` (Optional, string)*: The name of the index that will be created. +** *`index_settings` (Optional, Record)*: The settings that should be added to the index when it is mounted. +** *`ignore_index_settings` (Optional, string[])*: The names of settings that should be removed from the index when it is mounted. +** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for the master node. +If the master node is not available before the timeout expires, the request fails and returns an error. +To indicate that the request should never timeout, set it to `-1`. +** *`wait_for_completion` (Optional, boolean)*: If true, the request blocks until the operation is complete. +** *`storage` (Optional, string)*: The mount option for the searchable snapshot index. + +[discrete] +==== stats +Get searchable snapshot statistics. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-searchable-snapshots-stats[Endpoint documentation] +[source,ts] +---- +client.searchableSnapshots.stats({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`index` (Optional, string | string[])*: A list of data streams and indices to retrieve statistics for. +** *`level` (Optional, Enum("cluster" | "indices" | "shards"))*: Return stats aggregated at cluster, index or shard level + +[discrete] +=== security +[discrete] +==== activate_user_profile +Activate a user profile. + +Create or update a user profile on behalf of another user. + +NOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions. +Individual users and external applications should not call this API directly. +The calling application must have either an `access_token` or a combination of `username` and `password` for the user that the profile document is intended for. +Elastic reserves the right to change or remove this feature in future releases without prior notice. + +This API creates or updates a profile document for end users with information that is extracted from the user's authentication object including `username`, `full_name,` `roles`, and the authentication realm. +For example, in the JWT `access_token` case, the profile user's `username` is extracted from the JWT token claim pointed to by the `claims.principal` setting of the JWT realm that authenticated the token. + +When updating a profile document, the API enables the document if it was disabled. +Any updates do not change existing content for either the `labels` or `data` fields. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-activate-user-profile[Endpoint documentation] +[source,ts] +---- +client.security.activateUserProfile({ grant_type }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`grant_type` (Enum("password" | "access_token"))*: The type of grant. +** *`access_token` (Optional, string)*: The user's Elasticsearch access token or JWT. +Both `access` and `id` JWT token types are supported and they depend on the underlying JWT realm configuration. +If you specify the `access_token` grant type, this parameter is required. +It is not valid with other grant types. +** *`password` (Optional, string)*: The user's password. +If you specify the `password` grant type, this parameter is required. +It is not valid with other grant types. +** *`username` (Optional, string)*: The username that identifies the user. +If you specify the `password` grant type, this parameter is required. +It is not valid with other grant types. + +[discrete] +==== authenticate +Authenticate a user. + +Authenticates a user and returns information about the authenticated user. +Include the user information in a [basic auth header](https://en.wikipedia.org/wiki/Basic_access_authentication). +A successful call returns a JSON structure that shows user information such as their username, the roles that are assigned to the user, any assigned metadata, and information about the realms that authenticated and authorized the user. +If the user cannot be authenticated, this API returns a 401 status code. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-authenticate[Endpoint documentation] +[source,ts] +---- +client.security.authenticate() +---- + + +[discrete] +==== bulk_delete_role +Bulk delete roles. + +The role management APIs are generally the preferred way to manage roles, rather than using file-based role management. +The bulk delete roles API cannot delete roles that are defined in roles files. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-bulk-delete-role[Endpoint documentation] +[source,ts] +---- +client.security.bulkDeleteRole({ names }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`names` (string[])*: An array of role names to delete +** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. + +[discrete] +==== bulk_put_role +Bulk create or update roles. + +The role management APIs are generally the preferred way to manage roles, rather than using file-based role management. +The bulk create or update roles API cannot update roles that are defined in roles files. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-bulk-put-role[Endpoint documentation] +[source,ts] +---- +client.security.bulkPutRole({ roles }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`roles` (Record)*: A dictionary of role name to RoleDescriptor objects to add or update +** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. + +[discrete] +==== bulk_update_api_keys +Bulk update API keys. +Update the attributes for multiple API keys. + +IMPORTANT: It is not possible to use an API key as the authentication credential for this API. To update API keys, the owner user's credentials are required. + +This API is similar to the update API key API but enables you to apply the same update to multiple API keys in one API call. This operation can greatly improve performance over making individual updates. + +It is not possible to update expired or invalidated API keys. + +This API supports updates to API key access scope, metadata and expiration. +The access scope of each API key is derived from the `role_descriptors` you specify in the request and a snapshot of the owner user's permissions at the time of the request. +The snapshot of the owner's permissions is updated automatically on every call. + +IMPORTANT: If you don't specify `role_descriptors` in the request, a call to this API might still change an API key's access scope. This change can occur if the owner user's permissions have changed since the API key was created or last modified. + +A successful request returns a JSON structure that contains the IDs of all updated API keys, the IDs of API keys that already had the requested changes and did not require an update, and error details for any failed update. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-bulk-update-api-keys[Endpoint documentation] +[source,ts] +---- +client.security.bulkUpdateApiKeys({ ids }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`ids` (string | string[])*: The API key identifiers. +** *`expiration` (Optional, string | -1 | 0)*: Expiration time for the API keys. +By default, API keys never expire. +This property can be omitted to leave the value unchanged. +** *`metadata` (Optional, Record)*: Arbitrary nested metadata to associate with the API keys. +Within the `metadata` object, top-level keys beginning with an underscore (`_`) are reserved for system usage. +Any information specified with this parameter fully replaces metadata previously associated with the API key. +** *`role_descriptors` (Optional, Record)*: The role descriptors to assign to the API keys. +An API key's effective permissions are an intersection of its assigned privileges and the point-in-time snapshot of permissions of the owner user. +You can assign new privileges by specifying them in this parameter. +To remove assigned privileges, supply the `role_descriptors` parameter as an empty object `{}`. +If an API key has no assigned privileges, it inherits the owner user's full permissions. +The snapshot of the owner's permissions is always updated, whether you supply the `role_descriptors` parameter. +The structure of a role descriptor is the same as the request for the create API keys API. + +[discrete] +==== change_password +Change passwords. + +Change the passwords of users in the native realm and built-in users. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-change-password[Endpoint documentation] +[source,ts] +---- +client.security.changePassword({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`username` (Optional, string)*: The user whose password you want to change. If you do not specify this +parameter, the password is changed for the current user. +** *`password` (Optional, string)*: The new password value. Passwords must be at least 6 characters long. +** *`password_hash` (Optional, string)*: A hash of the new password value. This must be produced using the same +hashing algorithm as has been configured for password storage. For more details, +see the explanation of the `xpack.security.authc.password_hashing.algorithm` +setting. +** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. + +[discrete] +==== clear_api_key_cache +Clear the API key cache. + +Evict a subset of all entries from the API key cache. +The cache is also automatically cleared on state changes of the security index. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-clear-api-key-cache[Endpoint documentation] +[source,ts] +---- +client.security.clearApiKeyCache({ ids }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`ids` (string | string[])*: List of API key IDs to evict from the API key cache. +To evict all API keys, use `*`. +Does not support other wildcard patterns. + +[discrete] +==== clear_cached_privileges +Clear the privileges cache. + +Evict privileges from the native application privilege cache. +The cache is also automatically cleared for applications that have their privileges updated. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-clear-cached-privileges[Endpoint documentation] +[source,ts] +---- +client.security.clearCachedPrivileges({ application }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`application` (string)*: A list of applications. +To clear all applications, use an asterism (`*`). +It does not support other wildcard patterns. + +[discrete] +==== clear_cached_realms +Clear the user cache. + +Evict users from the user cache. +You can completely clear the cache or evict specific users. + +User credentials are cached in memory on each node to avoid connecting to a remote authentication service or hitting the disk for every incoming request. +There are realm settings that you can use to configure the user cache. +For more information, refer to the documentation about controlling the user cache. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-clear-cached-realms[Endpoint documentation] +[source,ts] +---- +client.security.clearCachedRealms({ realms }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`realms` (string | string[])*: A list of realms. +To clear all realms, use an asterisk (`*`). +It does not support other wildcard patterns. +** *`usernames` (Optional, string[])*: A list of the users to clear from the cache. +If you do not specify this parameter, the API evicts all users from the user cache. + +[discrete] +==== clear_cached_roles +Clear the roles cache. + +Evict roles from the native role cache. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-clear-cached-roles[Endpoint documentation] +[source,ts] +---- +client.security.clearCachedRoles({ name }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`name` (string | string[])*: A list of roles to evict from the role cache. +To evict all roles, use an asterisk (`*`). +It does not support other wildcard patterns. + +[discrete] +==== clear_cached_service_tokens +Clear service account token caches. + +Evict a subset of all entries from the service account token caches. +Two separate caches exist for service account tokens: one cache for tokens backed by the `service_tokens` file, and another for tokens backed by the `.security` index. +This API clears matching entries from both caches. + +The cache for service account tokens backed by the `.security` index is cleared automatically on state changes of the security index. +The cache for tokens backed by the `service_tokens` file is cleared automatically on file changes. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-clear-cached-service-tokens[Endpoint documentation] +[source,ts] +---- +client.security.clearCachedServiceTokens({ namespace, service, name }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`namespace` (string)*: The namespace, which is a top-level grouping of service accounts. +** *`service` (string)*: The name of the service, which must be unique within its namespace. +** *`name` (string | string[])*: A list of token names to evict from the service account token caches. +Use a wildcard (`*`) to evict all tokens that belong to a service account. +It does not support other wildcard patterns. + +[discrete] +==== create_api_key +Create an API key. + +Create an API key for access without requiring basic authentication. + +IMPORTANT: If the credential that is used to authenticate this request is an API key, the derived API key cannot have any privileges. +If you specify privileges, the API returns an error. + +A successful request returns a JSON structure that contains the API key, its unique id, and its name. +If applicable, it also returns expiration information for the API key in milliseconds. + +NOTE: By default, API keys never expire. You can specify expiration information when you create the API keys. + +The API keys are created by the Elasticsearch API key service, which is automatically enabled. +To configure or turn off the API key service, refer to API key service setting documentation. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-create-api-key[Endpoint documentation] +[source,ts] +---- +client.security.createApiKey({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`expiration` (Optional, string | -1 | 0)*: The expiration time for the API key. +By default, API keys never expire. +** *`name` (Optional, string)*: A name for the API key. +** *`role_descriptors` (Optional, Record)*: An array of role descriptors for this API key. +When it is not specified or it is an empty array, the API key will have a point in time snapshot of permissions of the authenticated user. +If you supply role descriptors, the resultant permissions are an intersection of API keys permissions and the authenticated user's permissions thereby limiting the access scope for API keys. +The structure of role descriptor is the same as the request for the create role API. +For more details, refer to the create or update roles API. + +NOTE: Due to the way in which this permission intersection is calculated, it is not possible to create an API key that is a child of another API key, unless the derived key is created without any privileges. +In this case, you must explicitly specify a role descriptor with no privileges. +The derived API key can be used for authentication; it will not have authority to call Elasticsearch APIs. +** *`metadata` (Optional, Record)*: Arbitrary metadata that you want to associate with the API key. It supports nested data structure. Within the metadata object, keys beginning with `_` are reserved for system usage. +** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. + +[discrete] +==== create_cross_cluster_api_key +Create a cross-cluster API key. + +Create an API key of the `cross_cluster` type for the API key based remote cluster access. +A `cross_cluster` API key cannot be used to authenticate through the REST interface. + +IMPORTANT: To authenticate this request you must use a credential that is not an API key. Even if you use an API key that has the required privilege, the API returns an error. + +Cross-cluster API keys are created by the Elasticsearch API key service, which is automatically enabled. + +NOTE: Unlike REST API keys, a cross-cluster API key does not capture permissions of the authenticated user. The API key’s effective permission is exactly as specified with the `access` property. + +A successful request returns a JSON structure that contains the API key, its unique ID, and its name. If applicable, it also returns expiration information for the API key in milliseconds. + +By default, API keys never expire. You can specify expiration information when you create the API keys. + +Cross-cluster API keys can only be updated with the update cross-cluster API key API. +Attempting to update them with the update REST API key API or the bulk update REST API keys API will result in an error. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-create-cross-cluster-api-key[Endpoint documentation] +[source,ts] +---- +client.security.createCrossClusterApiKey({ access, name }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`access` ({ replication, search })*: The access to be granted to this API key. +The access is composed of permissions for cross-cluster search and cross-cluster replication. +At least one of them must be specified. + +NOTE: No explicit privileges should be specified for either search or replication access. +The creation process automatically converts the access specification to a role descriptor which has relevant privileges assigned accordingly. +** *`name` (string)*: Specifies the name for this API key. +** *`expiration` (Optional, string | -1 | 0)*: Expiration time for the API key. +By default, API keys never expire. +** *`metadata` (Optional, Record)*: Arbitrary metadata that you want to associate with the API key. +It supports nested data structure. +Within the metadata object, keys beginning with `_` are reserved for system usage. + +[discrete] +==== create_service_token +Create a service account token. + +Create a service accounts token for access without requiring basic authentication. + +NOTE: Service account tokens never expire. +You must actively delete them if they are no longer needed. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-create-service-token[Endpoint documentation] +[source,ts] +---- +client.security.createServiceToken({ namespace, service }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`namespace` (string)*: The name of the namespace, which is a top-level grouping of service accounts. +** *`service` (string)*: The name of the service. +** *`name` (Optional, string)*: The name for the service account token. +If omitted, a random name will be generated. + +Token names must be at least one and no more than 256 characters. +They can contain alphanumeric characters (a-z, A-Z, 0-9), dashes (`-`), and underscores (`_`), but cannot begin with an underscore. + +NOTE: Token names must be unique in the context of the associated service account. +They must also be globally unique with their fully qualified names, which are comprised of the service account principal and token name, such as `//`. +** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true` then refresh the affected shards to make this operation visible to search, if `wait_for` (the default) then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. + +[discrete] +==== delegate_pki +Delegate PKI authentication. + +This API implements the exchange of an X509Certificate chain for an Elasticsearch access token. +The certificate chain is validated, according to RFC 5280, by sequentially considering the trust configuration of every installed PKI realm that has `delegation.enabled` set to `true`. +A successfully trusted client certificate is also subject to the validation of the subject distinguished name according to thw `username_pattern` of the respective realm. + +This API is called by smart and trusted proxies, such as Kibana, which terminate the user's TLS session but still want to authenticate the user by using a PKI realm—-​as if the user connected directly to Elasticsearch. + +IMPORTANT: The association between the subject public key in the target certificate and the corresponding private key is not validated. +This is part of the TLS authentication process and it is delegated to the proxy that calls this API. +The proxy is trusted to have performed the TLS authentication and this API translates that authentication into an Elasticsearch access token. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-delegate-pki[Endpoint documentation] +[source,ts] +---- +client.security.delegatePki({ x509_certificate_chain }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`x509_certificate_chain` (string[])*: The X509Certificate chain, which is represented as an ordered string array. +Each string in the array is a base64-encoded (Section 4 of RFC4648 - not base64url-encoded) of the certificate's DER encoding. + +The first element is the target certificate that contains the subject distinguished name that is requesting access. +This may be followed by additional certificates; each subsequent certificate is used to certify the previous one. + +[discrete] +==== delete_privileges +Delete application privileges. + +To use this API, you must have one of the following privileges: + +* The `manage_security` cluster privilege (or a greater privilege such as `all`). +* The "Manage Application Privileges" global privilege for the application being referenced in the request. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-delete-privileges[Endpoint documentation] +[source,ts] +---- +client.security.deletePrivileges({ application, name }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`application` (string)*: The name of the application. +Application privileges are always associated with exactly one application. +** *`name` (string | string[])*: The name of the privilege. +** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. + +[discrete] +==== delete_role +Delete roles. + +Delete roles in the native realm. +The role management APIs are generally the preferred way to manage roles, rather than using file-based role management. +The delete roles API cannot remove roles that are defined in roles files. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-delete-role[Endpoint documentation] +[source,ts] +---- +client.security.deleteRole({ name }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`name` (string)*: The name of the role. +** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. + +[discrete] +==== delete_role_mapping +Delete role mappings. + +Role mappings define which roles are assigned to each user. +The role mapping APIs are generally the preferred way to manage role mappings rather than using role mapping files. +The delete role mappings API cannot remove role mappings that are defined in role mapping files. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-delete-role-mapping[Endpoint documentation] +[source,ts] +---- +client.security.deleteRoleMapping({ name }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`name` (string)*: The distinct name that identifies the role mapping. +The name is used solely as an identifier to facilitate interaction via the API; it does not affect the behavior of the mapping in any way. +** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. + +[discrete] +==== delete_service_token +Delete service account tokens. + +Delete service account tokens for a service in a specified namespace. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-delete-service-token[Endpoint documentation] +[source,ts] +---- +client.security.deleteServiceToken({ namespace, service, name }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`namespace` (string)*: The namespace, which is a top-level grouping of service accounts. +** *`service` (string)*: The service name. +** *`name` (string)*: The name of the service account token. +** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true` then refresh the affected shards to make this operation visible to search, if `wait_for` (the default) then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. + +[discrete] +==== delete_user +Delete users. + +Delete users from the native realm. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-delete-user[Endpoint documentation] +[source,ts] +---- +client.security.deleteUser({ username }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`username` (string)*: An identifier for the user. +** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. + +[discrete] +==== disable_user +Disable users. + +Disable users in the native realm. +By default, when you create users, they are enabled. +You can use this API to revoke a user's access to Elasticsearch. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-disable-user[Endpoint documentation] +[source,ts] +---- +client.security.disableUser({ username }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`username` (string)*: An identifier for the user. +** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. + +[discrete] +==== disable_user_profile +Disable a user profile. + +Disable user profiles so that they are not visible in user profile searches. + +NOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions. +Individual users and external applications should not call this API directly. +Elastic reserves the right to change or remove this feature in future releases without prior notice. + +When you activate a user profile, its automatically enabled and visible in user profile searches. You can use the disable user profile API to disable a user profile so it’s not visible in these searches. +To re-enable a disabled user profile, use the enable user profile API . + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-disable-user-profile[Endpoint documentation] +[source,ts] +---- +client.security.disableUserProfile({ uid }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`uid` (string)*: Unique identifier for the user profile. +** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If 'true', Elasticsearch refreshes the affected shards to make this operation visible to search. +If 'wait_for', it waits for a refresh to make this operation visible to search. +If 'false', it does nothing with refreshes. + +[discrete] +==== enable_user +Enable users. + +Enable users in the native realm. +By default, when you create users, they are enabled. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-enable-user[Endpoint documentation] +[source,ts] +---- +client.security.enableUser({ username }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`username` (string)*: An identifier for the user. +** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. + +[discrete] +==== enable_user_profile +Enable a user profile. + +Enable user profiles to make them visible in user profile searches. + +NOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions. +Individual users and external applications should not call this API directly. +Elastic reserves the right to change or remove this feature in future releases without prior notice. + +When you activate a user profile, it's automatically enabled and visible in user profile searches. +If you later disable the user profile, you can use the enable user profile API to make the profile visible in these searches again. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-enable-user-profile[Endpoint documentation] +[source,ts] +---- +client.security.enableUserProfile({ uid }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`uid` (string)*: A unique identifier for the user profile. +** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If 'true', Elasticsearch refreshes the affected shards to make this operation +visible to search. +If 'wait_for', it waits for a refresh to make this operation visible to search. +If 'false', nothing is done with refreshes. + +[discrete] +==== enroll_kibana +Enroll Kibana. + +Enable a Kibana instance to configure itself for communication with a secured Elasticsearch cluster. + +NOTE: This API is currently intended for internal use only by Kibana. +Kibana uses this API internally to configure itself for communications with an Elasticsearch cluster that already has security features enabled. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-enroll-kibana[Endpoint documentation] +[source,ts] +---- +client.security.enrollKibana() +---- + + +[discrete] +==== enroll_node +Enroll a node. + +Enroll a new node to allow it to join an existing cluster with security features enabled. + +The response contains all the necessary information for the joining node to bootstrap discovery and security related settings so that it can successfully join the cluster. +The response contains key and certificate material that allows the caller to generate valid signed certificates for the HTTP layer of all nodes in the cluster. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-enroll-node[Endpoint documentation] +[source,ts] +---- +client.security.enrollNode() +---- + + +[discrete] +==== get_api_key +Get API key information. + +Retrieves information for one or more API keys. +NOTE: If you have only the `manage_own_api_key` privilege, this API returns only the API keys that you own. +If you have `read_security`, `manage_api_key` or greater privileges (including `manage_security`), this API returns all API keys regardless of ownership. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-api-key[Endpoint documentation] +[source,ts] +---- +client.security.getApiKey({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`id` (Optional, string)*: An API key id. +This parameter cannot be used with any of `name`, `realm_name` or `username`. +** *`name` (Optional, string)*: An API key name. +This parameter cannot be used with any of `id`, `realm_name` or `username`. +It supports prefix search with wildcard. +** *`owner` (Optional, boolean)*: A boolean flag that can be used to query API keys owned by the currently authenticated user. +The `realm_name` or `username` parameters cannot be specified when this parameter is set to `true` as they are assumed to be the currently authenticated ones. +** *`realm_name` (Optional, string)*: The name of an authentication realm. +This parameter cannot be used with either `id` or `name` or when `owner` flag is set to `true`. +** *`username` (Optional, string)*: The username of a user. +This parameter cannot be used with either `id` or `name` or when `owner` flag is set to `true`. +** *`with_limited_by` (Optional, boolean)*: Return the snapshot of the owner user's role descriptors +associated with the API key. An API key's actual +permission is the intersection of its assigned role +descriptors and the owner user's role descriptors. +** *`active_only` (Optional, boolean)*: A boolean flag that can be used to query API keys that are currently active. An API key is considered active if it is neither invalidated, nor expired at query time. You can specify this together with other parameters such as `owner` or `name`. If `active_only` is false, the response will include both active and inactive (expired or invalidated) keys. +** *`with_profile_uid` (Optional, boolean)*: Determines whether to also retrieve the profile uid, for the API key owner principal, if it exists. + +[discrete] +==== get_builtin_privileges +Get builtin privileges. + +Get the list of cluster privileges and index privileges that are available in this version of Elasticsearch. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-builtin-privileges[Endpoint documentation] +[source,ts] +---- +client.security.getBuiltinPrivileges() +---- + + +[discrete] +==== get_privileges +Get application privileges. + +To use this API, you must have one of the following privileges: + +* The `read_security` cluster privilege (or a greater privilege such as `manage_security` or `all`). +* The "Manage Application Privileges" global privilege for the application being referenced in the request. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-privileges[Endpoint documentation] +[source,ts] +---- +client.security.getPrivileges({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`application` (Optional, string)*: The name of the application. +Application privileges are always associated with exactly one application. +If you do not specify this parameter, the API returns information about all privileges for all applications. +** *`name` (Optional, string | string[])*: The name of the privilege. +If you do not specify this parameter, the API returns information about all privileges for the requested application. + +[discrete] +==== get_role +Get roles. + +Get roles in the native realm. +The role management APIs are generally the preferred way to manage roles, rather than using file-based role management. +The get roles API cannot retrieve roles that are defined in roles files. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-role[Endpoint documentation] +[source,ts] +---- +client.security.getRole({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`name` (Optional, string | string[])*: The name of the role. +You can specify multiple roles as a list. +If you do not specify this parameter, the API returns information about all roles. + +[discrete] +==== get_role_mapping +Get role mappings. + +Role mappings define which roles are assigned to each user. +The role mapping APIs are generally the preferred way to manage role mappings rather than using role mapping files. +The get role mappings API cannot retrieve role mappings that are defined in role mapping files. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-role-mapping[Endpoint documentation] +[source,ts] +---- +client.security.getRoleMapping({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`name` (Optional, string | string[])*: The distinct name that identifies the role mapping. The name is used solely as an identifier to facilitate interaction via the API; it does not affect the behavior of the mapping in any way. You can specify multiple mapping names as a list. If you do not specify this parameter, the API returns information about all role mappings. + +[discrete] +==== get_service_accounts +Get service accounts. + +Get a list of service accounts that match the provided path parameters. + +NOTE: Currently, only the `elastic/fleet-server` service account is available. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-service-accounts[Endpoint documentation] +[source,ts] +---- +client.security.getServiceAccounts({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`namespace` (Optional, string)*: The name of the namespace. +Omit this parameter to retrieve information about all service accounts. +If you omit this parameter, you must also omit the `service` parameter. +** *`service` (Optional, string)*: The service name. +Omit this parameter to retrieve information about all service accounts that belong to the specified `namespace`. + +[discrete] +==== get_service_credentials +Get service account credentials. + +To use this API, you must have at least the `read_security` cluster privilege (or a greater privilege such as `manage_service_account` or `manage_security`). + +The response includes service account tokens that were created with the create service account tokens API as well as file-backed tokens from all nodes of the cluster. + +NOTE: For tokens backed by the `service_tokens` file, the API collects them from all nodes of the cluster. +Tokens with the same name from different nodes are assumed to be the same token and are only counted once towards the total number of service tokens. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-service-credentials[Endpoint documentation] +[source,ts] +---- +client.security.getServiceCredentials({ namespace, service }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`namespace` (string)*: The name of the namespace. +** *`service` (string)*: The service name. + +[discrete] +==== get_settings +Get security index settings. + +Get the user-configurable settings for the security internal index (`.security` and associated indices). +Only a subset of the index settings — those that are user-configurable—will be shown. +This includes: + +* `index.auto_expand_replicas` +* `index.number_of_replicas` + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-settings[Endpoint documentation] +[source,ts] +---- +client.security.getSettings({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. + +[discrete] +==== get_token +Get a token. + +Create a bearer token for access without requiring basic authentication. +The tokens are created by the Elasticsearch Token Service, which is automatically enabled when you configure TLS on the HTTP interface. +Alternatively, you can explicitly enable the `xpack.security.authc.token.enabled` setting. +When you are running in production mode, a bootstrap check prevents you from enabling the token service unless you also enable TLS on the HTTP interface. + +The get token API takes the same parameters as a typical OAuth 2.0 token API except for the use of a JSON request body. + +A successful get token API call returns a JSON structure that contains the access token, the amount of time (seconds) that the token expires in, the type, and the scope if available. + +The tokens returned by the get token API have a finite period of time for which they are valid and after that time period, they can no longer be used. +That time period is defined by the `xpack.security.authc.token.timeout` setting. +If you want to invalidate a token immediately, you can do so by using the invalidate token API. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-token[Endpoint documentation] +[source,ts] +---- +client.security.getToken({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`grant_type` (Optional, Enum("password" | "client_credentials" | "_kerberos" | "refresh_token"))*: The type of grant. +Supported grant types are: `password`, `_kerberos`, `client_credentials`, and `refresh_token`. +** *`scope` (Optional, string)*: The scope of the token. +Currently tokens are only issued for a scope of FULL regardless of the value sent with the request. +** *`password` (Optional, string)*: The user's password. +If you specify the `password` grant type, this parameter is required. +This parameter is not valid with any other supported grant type. +** *`kerberos_ticket` (Optional, string)*: The base64 encoded kerberos ticket. +If you specify the `_kerberos` grant type, this parameter is required. +This parameter is not valid with any other supported grant type. +** *`refresh_token` (Optional, string)*: The string that was returned when you created the token, which enables you to extend its life. +If you specify the `refresh_token` grant type, this parameter is required. +This parameter is not valid with any other supported grant type. +** *`username` (Optional, string)*: The username that identifies the user. +If you specify the `password` grant type, this parameter is required. +This parameter is not valid with any other supported grant type. + +[discrete] +==== get_user +Get users. + +Get information about users in the native realm and built-in users. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-user[Endpoint documentation] +[source,ts] +---- +client.security.getUser({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`username` (Optional, string | string[])*: An identifier for the user. You can specify multiple usernames as a list. If you omit this parameter, the API retrieves information about all users. +** *`with_profile_uid` (Optional, boolean)*: Determines whether to retrieve the user profile UID, if it exists, for the users. + +[discrete] +==== get_user_privileges +Get user privileges. + +Get the security privileges for the logged in user. +All users can use this API, but only to determine their own privileges. +To check the privileges of other users, you must use the run as feature. +To check whether a user has a specific list of privileges, use the has privileges API. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-user-privileges[Endpoint documentation] +[source,ts] +---- +client.security.getUserPrivileges({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`application` (Optional, string)*: The name of the application. Application privileges are always associated with exactly one application. If you do not specify this parameter, the API returns information about all privileges for all applications. +** *`priviledge` (Optional, string)*: The name of the privilege. If you do not specify this parameter, the API returns information about all privileges for the requested application. +** *`username` (Optional, string | null)* + +[discrete] +==== get_user_profile +Get a user profile. + +Get a user's profile using the unique profile ID. + +NOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions. +Individual users and external applications should not call this API directly. +Elastic reserves the right to change or remove this feature in future releases without prior notice. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-user-profile[Endpoint documentation] +[source,ts] +---- +client.security.getUserProfile({ uid }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`uid` (string | string[])*: A unique identifier for the user profile. +** *`data` (Optional, string | string[])*: A list of filters for the `data` field of the profile document. +To return all content use `data=*`. +To return a subset of content use `data=` to retrieve content nested under the specified ``. +By default returns no `data` content. + +[discrete] +==== grant_api_key +Grant an API key. + +Create an API key on behalf of another user. +This API is similar to the create API keys API, however it creates the API key for a user that is different than the user that runs the API. +The caller must have authentication credentials for the user on whose behalf the API key will be created. +It is not possible to use this API to create an API key without that user's credentials. +The supported user authentication credential types are: + +* username and password +* Elasticsearch access tokens +* JWTs + +The user, for whom the authentication credentials is provided, can optionally "run as" (impersonate) another user. +In this case, the API key will be created on behalf of the impersonated user. + +This API is intended be used by applications that need to create and manage API keys for end users, but cannot guarantee that those users have permission to create API keys on their own behalf. +The API keys are created by the Elasticsearch API key service, which is automatically enabled. + +A successful grant API key API call returns a JSON structure that contains the API key, its unique id, and its name. +If applicable, it also returns expiration information for the API key in milliseconds. + +By default, API keys never expire. You can specify expiration information when you create the API keys. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-grant-api-key[Endpoint documentation] +[source,ts] +---- +client.security.grantApiKey({ api_key, grant_type }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`api_key` ({ name, expiration, role_descriptors, metadata })*: The API key. +** *`grant_type` (Enum("access_token" | "password"))*: The type of grant. Supported grant types are: `access_token`, `password`. +** *`access_token` (Optional, string)*: The user's access token. +If you specify the `access_token` grant type, this parameter is required. +It is not valid with other grant types. +** *`username` (Optional, string)*: The user name that identifies the user. +If you specify the `password` grant type, this parameter is required. +It is not valid with other grant types. +** *`password` (Optional, string)*: The user's password. +If you specify the `password` grant type, this parameter is required. +It is not valid with other grant types. +** *`run_as` (Optional, string)*: The name of the user to be impersonated. + +[discrete] +==== has_privileges +Check user privileges. + +Determine whether the specified user has a specified list of privileges. +All users can use this API, but only to determine their own privileges. +To check the privileges of other users, you must use the run as feature. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-has-privileges[Endpoint documentation] +[source,ts] +---- +client.security.hasPrivileges({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`user` (Optional, string)*: Username +** *`application` (Optional, { application, privileges, resources }[])* +** *`cluster` (Optional, Enum("all" | "cancel_task" | "create_snapshot" | "cross_cluster_replication" | "cross_cluster_search" | "delegate_pki" | "grant_api_key" | "manage" | "manage_api_key" | "manage_autoscaling" | "manage_behavioral_analytics" | "manage_ccr" | "manage_data_frame_transforms" | "manage_data_stream_global_retention" | "manage_enrich" | "manage_ilm" | "manage_index_templates" | "manage_inference" | "manage_ingest_pipelines" | "manage_logstash_pipelines" | "manage_ml" | "manage_oidc" | "manage_own_api_key" | "manage_pipeline" | "manage_rollup" | "manage_saml" | "manage_search_application" | "manage_search_query_rules" | "manage_search_synonyms" | "manage_security" | "manage_service_account" | "manage_slm" | "manage_token" | "manage_transform" | "manage_user_profile" | "manage_watcher" | "monitor" | "monitor_data_frame_transforms" | "monitor_data_stream_global_retention" | "monitor_enrich" | "monitor_inference" | "monitor_ml" | "monitor_rollup" | "monitor_snapshot" | "monitor_stats" | "monitor_text_structure" | "monitor_transform" | "monitor_watcher" | "none" | "post_behavioral_analytics_event" | "read_ccr" | "read_fleet_secrets" | "read_ilm" | "read_pipeline" | "read_security" | "read_slm" | "transport_client" | "write_connector_secrets" | "write_fleet_secrets")[])*: A list of the cluster privileges that you want to check. +** *`index` (Optional, { names, privileges, allow_restricted_indices }[])* + +[discrete] +==== has_privileges_user_profile +Check user profile privileges. + +Determine whether the users associated with the specified user profile IDs have all the requested privileges. + +NOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions. Individual users and external applications should not call this API directly. +Elastic reserves the right to change or remove this feature in future releases without prior notice. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-has-privileges-user-profile[Endpoint documentation] +[source,ts] +---- +client.security.hasPrivilegesUserProfile({ uids, privileges }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`uids` (string[])*: A list of profile IDs. The privileges are checked for associated users of the profiles. +** *`privileges` ({ application, cluster, index })*: An object containing all the privileges to be checked. + +[discrete] +==== invalidate_api_key +Invalidate API keys. + +This API invalidates API keys created by the create API key or grant API key APIs. +Invalidated API keys fail authentication, but they can still be viewed using the get API key information and query API key information APIs, for at least the configured retention period, until they are automatically deleted. + +To use this API, you must have at least the `manage_security`, `manage_api_key`, or `manage_own_api_key` cluster privileges. +The `manage_security` privilege allows deleting any API key, including both REST and cross cluster API keys. +The `manage_api_key` privilege allows deleting any REST API key, but not cross cluster API keys. +The `manage_own_api_key` only allows deleting REST API keys that are owned by the user. +In addition, with the `manage_own_api_key` privilege, an invalidation request must be issued in one of the three formats: + +- Set the parameter `owner=true`. +- Or, set both `username` and `realm_name` to match the user's identity. +- Or, if the request is issued by an API key, that is to say an API key invalidates itself, specify its ID in the `ids` field. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-invalidate-api-key[Endpoint documentation] +[source,ts] +---- +client.security.invalidateApiKey({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`id` (Optional, string)* +** *`ids` (Optional, string[])*: A list of API key ids. +This parameter cannot be used with any of `name`, `realm_name`, or `username`. +** *`name` (Optional, string)*: An API key name. +This parameter cannot be used with any of `ids`, `realm_name` or `username`. +** *`owner` (Optional, boolean)*: Query API keys owned by the currently authenticated user. +The `realm_name` or `username` parameters cannot be specified when this parameter is set to `true` as they are assumed to be the currently authenticated ones. + +NOTE: At least one of `ids`, `name`, `username`, and `realm_name` must be specified if `owner` is `false`. +** *`realm_name` (Optional, string)*: The name of an authentication realm. +This parameter cannot be used with either `ids` or `name`, or when `owner` flag is set to `true`. +** *`username` (Optional, string)*: The username of a user. +This parameter cannot be used with either `ids` or `name` or when `owner` flag is set to `true`. + +[discrete] +==== invalidate_token +Invalidate a token. + +The access tokens returned by the get token API have a finite period of time for which they are valid. +After that time period, they can no longer be used. +The time period is defined by the `xpack.security.authc.token.timeout` setting. + +The refresh tokens returned by the get token API are only valid for 24 hours. +They can also be used exactly once. +If you want to invalidate one or more access or refresh tokens immediately, use this invalidate token API. + +NOTE: While all parameters are optional, at least one of them is required. +More specifically, either one of `token` or `refresh_token` parameters is required. +If none of these two are specified, then `realm_name` and/or `username` need to be specified. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-invalidate-token[Endpoint documentation] +[source,ts] +---- +client.security.invalidateToken({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`token` (Optional, string)*: An access token. +This parameter cannot be used if any of `refresh_token`, `realm_name`, or `username` are used. +** *`refresh_token` (Optional, string)*: A refresh token. +This parameter cannot be used if any of `refresh_token`, `realm_name`, or `username` are used. +** *`realm_name` (Optional, string)*: The name of an authentication realm. +This parameter cannot be used with either `refresh_token` or `token`. +** *`username` (Optional, string)*: The username of a user. +This parameter cannot be used with either `refresh_token` or `token`. + +[discrete] +==== oidc_authenticate +Authenticate OpenID Connect. + +Exchange an OpenID Connect authentication response message for an Elasticsearch internal access token and refresh token that can be subsequently used for authentication. + +Elasticsearch exposes all the necessary OpenID Connect related functionality with the OpenID Connect APIs. +These APIs are used internally by Kibana in order to provide OpenID Connect based authentication, but can also be used by other, custom web applications or other clients. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-oidc-authenticate[Endpoint documentation] +[source,ts] +---- +client.security.oidcAuthenticate({ nonce, redirect_uri, state }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`nonce` (string)*: Associate a client session with an ID token and mitigate replay attacks. +This value needs to be the same as the one that was provided to the `/_security/oidc/prepare` API or the one that was generated by Elasticsearch and included in the response to that call. +** *`redirect_uri` (string)*: The URL to which the OpenID Connect Provider redirected the User Agent in response to an authentication request after a successful authentication. +This URL must be provided as-is (URL encoded), taken from the body of the response or as the value of a location header in the response from the OpenID Connect Provider. +** *`state` (string)*: Maintain state between the authentication request and the response. +This value needs to be the same as the one that was provided to the `/_security/oidc/prepare` API or the one that was generated by Elasticsearch and included in the response to that call. +** *`realm` (Optional, string)*: The name of the OpenID Connect realm. +This property is useful in cases where multiple realms are defined. + +[discrete] +==== oidc_logout +Logout of OpenID Connect. + +Invalidate an access token and a refresh token that were generated as a response to the `/_security/oidc/authenticate` API. + +If the OpenID Connect authentication realm in Elasticsearch is accordingly configured, the response to this call will contain a URI pointing to the end session endpoint of the OpenID Connect Provider in order to perform single logout. + +Elasticsearch exposes all the necessary OpenID Connect related functionality with the OpenID Connect APIs. +These APIs are used internally by Kibana in order to provide OpenID Connect based authentication, but can also be used by other, custom web applications or other clients. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-oidc-logout[Endpoint documentation] +[source,ts] +---- +client.security.oidcLogout({ token }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`token` (string)*: The access token to be invalidated. +** *`refresh_token` (Optional, string)*: The refresh token to be invalidated. + +[discrete] +==== oidc_prepare_authentication +Prepare OpenID connect authentication. + +Create an oAuth 2.0 authentication request as a URL string based on the configuration of the OpenID Connect authentication realm in Elasticsearch. + +The response of this API is a URL pointing to the Authorization Endpoint of the configured OpenID Connect Provider, which can be used to redirect the browser of the user in order to continue the authentication process. + +Elasticsearch exposes all the necessary OpenID Connect related functionality with the OpenID Connect APIs. +These APIs are used internally by Kibana in order to provide OpenID Connect based authentication, but can also be used by other, custom web applications or other clients. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-oidc-prepare-authentication[Endpoint documentation] +[source,ts] +---- +client.security.oidcPrepareAuthentication({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`iss` (Optional, string)*: In the case of a third party initiated single sign on, this is the issuer identifier for the OP that the RP is to send the authentication request to. +It cannot be specified when *realm* is specified. +One of *realm* or *iss* is required. +** *`login_hint` (Optional, string)*: In the case of a third party initiated single sign on, it is a string value that is included in the authentication request as the *login_hint* parameter. +This parameter is not valid when *realm* is specified. +** *`nonce` (Optional, string)*: The value used to associate a client session with an ID token and to mitigate replay attacks. +If the caller of the API does not provide a value, Elasticsearch will generate one with sufficient entropy and return it in the response. +** *`realm` (Optional, string)*: The name of the OpenID Connect realm in Elasticsearch the configuration of which should be used in order to generate the authentication request. +It cannot be specified when *iss* is specified. +One of *realm* or *iss* is required. +** *`state` (Optional, string)*: The value used to maintain state between the authentication request and the response, typically used as a Cross-Site Request Forgery mitigation. +If the caller of the API does not provide a value, Elasticsearch will generate one with sufficient entropy and return it in the response. + +[discrete] +==== put_privileges +Create or update application privileges. + +To use this API, you must have one of the following privileges: + +* The `manage_security` cluster privilege (or a greater privilege such as `all`). +* The "Manage Application Privileges" global privilege for the application being referenced in the request. + +Application names are formed from a prefix, with an optional suffix that conform to the following rules: + +* The prefix must begin with a lowercase ASCII letter. +* The prefix must contain only ASCII letters or digits. +* The prefix must be at least 3 characters long. +* If the suffix exists, it must begin with either a dash `-` or `_`. +* The suffix cannot contain any of the following characters: `\`, `/`, `*`, `?`, `"`, `<`, `>`, `|`, `,`, `*`. +* No part of the name can contain whitespace. + +Privilege names must begin with a lowercase ASCII letter and must contain only ASCII letters and digits along with the characters `_`, `-`, and `.`. + +Action names can contain any number of printable ASCII characters and must contain at least one of the following characters: `/`, `*`, `:`. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-put-privileges[Endpoint documentation] +[source,ts] +---- +client.security.putPrivileges({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`privileges` (Optional, Record>)* +** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. + +[discrete] +==== put_role +Create or update roles. + +The role management APIs are generally the preferred way to manage roles in the native realm, rather than using file-based role management. +The create or update roles API cannot update roles that are defined in roles files. +File-based role management is not available in Elastic Serverless. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-put-role[Endpoint documentation] +[source,ts] +---- +client.security.putRole({ name }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`name` (string)*: The name of the role that is being created or updated. On Elasticsearch Serverless, the role name must begin with a letter or digit and can only contain letters, digits and the characters '_', '-', and '.'. Each role must have a unique name, as this will serve as the identifier for that role. +** *`applications` (Optional, { application, privileges, resources }[])*: A list of application privilege entries. +** *`cluster` (Optional, Enum("all" | "cancel_task" | "create_snapshot" | "cross_cluster_replication" | "cross_cluster_search" | "delegate_pki" | "grant_api_key" | "manage" | "manage_api_key" | "manage_autoscaling" | "manage_behavioral_analytics" | "manage_ccr" | "manage_data_frame_transforms" | "manage_data_stream_global_retention" | "manage_enrich" | "manage_ilm" | "manage_index_templates" | "manage_inference" | "manage_ingest_pipelines" | "manage_logstash_pipelines" | "manage_ml" | "manage_oidc" | "manage_own_api_key" | "manage_pipeline" | "manage_rollup" | "manage_saml" | "manage_search_application" | "manage_search_query_rules" | "manage_search_synonyms" | "manage_security" | "manage_service_account" | "manage_slm" | "manage_token" | "manage_transform" | "manage_user_profile" | "manage_watcher" | "monitor" | "monitor_data_frame_transforms" | "monitor_data_stream_global_retention" | "monitor_enrich" | "monitor_inference" | "monitor_ml" | "monitor_rollup" | "monitor_snapshot" | "monitor_stats" | "monitor_text_structure" | "monitor_transform" | "monitor_watcher" | "none" | "post_behavioral_analytics_event" | "read_ccr" | "read_fleet_secrets" | "read_ilm" | "read_pipeline" | "read_security" | "read_slm" | "transport_client" | "write_connector_secrets" | "write_fleet_secrets")[])*: A list of cluster privileges. These privileges define the cluster-level actions for users with this role. +** *`global` (Optional, Record)*: An object defining global privileges. A global privilege is a form of cluster privilege that is request-aware. Support for global privileges is currently limited to the management of application privileges. +** *`indices` (Optional, { field_security, names, privileges, query, allow_restricted_indices }[])*: A list of indices permissions entries. +** *`remote_indices` (Optional, { clusters, field_security, names, privileges, query, allow_restricted_indices }[])*: A list of remote indices permissions entries. + +NOTE: Remote indices are effective for remote clusters configured with the API key based model. +They have no effect for remote clusters configured with the certificate based model. +** *`remote_cluster` (Optional, { clusters, privileges }[])*: A list of remote cluster permissions entries. +** *`metadata` (Optional, Record)*: Optional metadata. Within the metadata object, keys that begin with an underscore (`_`) are reserved for system use. +** *`run_as` (Optional, string[])*: A list of users that the owners of this role can impersonate. *Note*: in Serverless, the run-as feature is disabled. For API compatibility, you can still specify an empty `run_as` field, but a non-empty list will be rejected. +** *`description` (Optional, string)*: Optional description of the role descriptor +** *`transient_metadata` (Optional, Record)*: Indicates roles that might be incompatible with the current cluster license, specifically roles with document and field level security. When the cluster license doesn’t allow certain features for a given role, this parameter is updated dynamically to list the incompatible features. If `enabled` is `false`, the role is ignored, but is still listed in the response from the authenticate API. +** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. + +[discrete] +==== put_role_mapping +Create or update role mappings. + +Role mappings define which roles are assigned to each user. +Each mapping has rules that identify users and a list of roles that are granted to those users. +The role mapping APIs are generally the preferred way to manage role mappings rather than using role mapping files. The create or update role mappings API cannot update role mappings that are defined in role mapping files. + +NOTE: This API does not create roles. Rather, it maps users to existing roles. +Roles can be created by using the create or update roles API or roles files. + +**Role templates** + +The most common use for role mappings is to create a mapping from a known value on the user to a fixed role name. +For example, all users in the `cn=admin,dc=example,dc=com` LDAP group should be given the superuser role in Elasticsearch. +The `roles` field is used for this purpose. + +For more complex needs, it is possible to use Mustache templates to dynamically determine the names of the roles that should be granted to the user. +The `role_templates` field is used for this purpose. + +NOTE: To use role templates successfully, the relevant scripting feature must be enabled. +Otherwise, all attempts to create a role mapping with role templates fail. + +All of the user fields that are available in the role mapping rules are also available in the role templates. +Thus it is possible to assign a user to a role that reflects their username, their groups, or the name of the realm to which they authenticated. + +By default a template is evaluated to produce a single string that is the name of the role which should be assigned to the user. +If the format of the template is set to "json" then the template is expected to produce a JSON string or an array of JSON strings for the role names. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-put-role-mapping[Endpoint documentation] +[source,ts] +---- +client.security.putRoleMapping({ name }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`name` (string)*: The distinct name that identifies the role mapping. +The name is used solely as an identifier to facilitate interaction via the API; it does not affect the behavior of the mapping in any way. +** *`enabled` (Optional, boolean)*: Mappings that have `enabled` set to `false` are ignored when role mapping is performed. +** *`metadata` (Optional, Record)*: Additional metadata that helps define which roles are assigned to each user. +Within the metadata object, keys beginning with `_` are reserved for system usage. +** *`roles` (Optional, string[])*: A list of role names that are granted to the users that match the role mapping rules. +Exactly one of `roles` or `role_templates` must be specified. +** *`role_templates` (Optional, { format, template }[])*: A list of Mustache templates that will be evaluated to determine the roles names that should granted to the users that match the role mapping rules. +Exactly one of `roles` or `role_templates` must be specified. +** *`rules` (Optional, { any, all, field, except })*: The rules that determine which users should be matched by the mapping. +A rule is a logical condition that is expressed by using a JSON DSL. +** *`run_as` (Optional, string[])* +** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. + +[discrete] +==== put_user +Create or update users. + +Add and update users in the native realm. +A password is required for adding a new user but is optional when updating an existing user. +To change a user's password without updating any other fields, use the change password API. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-put-user[Endpoint documentation] +[source,ts] +---- +client.security.putUser({ username }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`username` (string)*: An identifier for the user. + +NOTE: Usernames must be at least 1 and no more than 507 characters. +They can contain alphanumeric characters (a-z, A-Z, 0-9), spaces, punctuation, and printable symbols in the Basic Latin (ASCII) block. +Leading or trailing whitespace is not allowed. +** *`email` (Optional, string | null)*: The email of the user. +** *`full_name` (Optional, string | null)*: The full name of the user. +** *`metadata` (Optional, Record)*: Arbitrary metadata that you want to associate with the user. +** *`password` (Optional, string)*: The user's password. +Passwords must be at least 6 characters long. +When adding a user, one of `password` or `password_hash` is required. +When updating an existing user, the password is optional, so that other fields on the user (such as their roles) may be updated without modifying the user's password +** *`password_hash` (Optional, string)*: A hash of the user's password. +This must be produced using the same hashing algorithm as has been configured for password storage. +For more details, see the explanation of the `xpack.security.authc.password_hashing.algorithm` setting in the user cache and password hash algorithm documentation. +Using this parameter allows the client to pre-hash the password for performance and/or confidentiality reasons. +The `password` parameter and the `password_hash` parameter cannot be used in the same request. +** *`roles` (Optional, string[])*: A set of roles the user has. +The roles determine the user's access permissions. +To create a user without any roles, specify an empty list (`[]`). +** *`enabled` (Optional, boolean)*: Specifies whether the user is enabled. +** *`refresh` (Optional, Enum(true | false | "wait_for"))*: Valid values are `true`, `false`, and `wait_for`. +These values have the same meaning as in the index API, but the default value for this API is true. + +[discrete] +==== query_api_keys +Find API keys with a query. + +Get a paginated list of API keys and their information. +You can optionally filter the results with a query. + +To use this API, you must have at least the `manage_own_api_key` or the `read_security` cluster privileges. +If you have only the `manage_own_api_key` privilege, this API returns only the API keys that you own. +If you have the `read_security`, `manage_api_key`, or greater privileges (including `manage_security`), this API returns all API keys regardless of ownership. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-query-api-keys[Endpoint documentation] +[source,ts] +---- +client.security.queryApiKeys({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`aggregations` (Optional, Record)*: Any aggregations to run over the corpus of returned API keys. +Aggregations and queries work together. Aggregations are computed only on the API keys that match the query. +This supports only a subset of aggregation types, namely: `terms`, `range`, `date_range`, `missing`, +`cardinality`, `value_count`, `composite`, `filter`, and `filters`. +Additionally, aggregations only run over the same subset of fields that query works with. +** *`query` (Optional, { bool, exists, ids, match, match_all, prefix, range, simple_query_string, term, terms, wildcard })*: A query to filter which API keys to return. +If the query parameter is missing, it is equivalent to a `match_all` query. +The query supports a subset of query types, including `match_all`, `bool`, `term`, `terms`, `match`, +`ids`, `prefix`, `wildcard`, `exists`, `range`, and `simple_query_string`. +You can query the following public information associated with an API key: `id`, `type`, `name`, +`creation`, `expiration`, `invalidated`, `invalidation`, `username`, `realm`, and `metadata`. + +NOTE: The queryable string values associated with API keys are internally mapped as keywords. +Consequently, if no `analyzer` parameter is specified for a `match` query, then the provided match query string is interpreted as a single keyword value. +Such a match query is hence equivalent to a `term` query. +** *`from` (Optional, number)*: The starting document offset. +It must not be negative. +By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. +To page through more hits, use the `search_after` parameter. +** *`sort` (Optional, string | { _score, _doc, _geo_distance, _script } | string | { _score, _doc, _geo_distance, _script }[])*: The sort definition. +Other than `id`, all public fields of an API key are eligible for sorting. +In addition, sort can also be applied to the `_doc` field to sort by index order. +** *`size` (Optional, number)*: The number of hits to return. +It must not be negative. +The `size` parameter can be set to `0`, in which case no API key matches are returned, only the aggregation results. +By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. +To page through more hits, use the `search_after` parameter. +** *`search_after` (Optional, number | number | string | boolean | null[])*: The search after definition. +** *`with_limited_by` (Optional, boolean)*: Return the snapshot of the owner user's role descriptors associated with the API key. +An API key's actual permission is the intersection of its assigned role descriptors and the owner user's role descriptors (effectively limited by it). +An API key cannot retrieve any API key’s limited-by role descriptors (including itself) unless it has `manage_api_key` or higher privileges. +** *`with_profile_uid` (Optional, boolean)*: Determines whether to also retrieve the profile UID for the API key owner principal. +If it exists, the profile UID is returned under the `profile_uid` response field for each API key. +** *`typed_keys` (Optional, boolean)*: Determines whether aggregation names are prefixed by their respective types in the response. + +[discrete] +==== query_role +Find roles with a query. + +Get roles in a paginated manner. +The role management APIs are generally the preferred way to manage roles, rather than using file-based role management. +The query roles API does not retrieve roles that are defined in roles files, nor built-in ones. +You can optionally filter the results with a query. +Also, the results can be paginated and sorted. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-query-role[Endpoint documentation] +[source,ts] +---- +client.security.queryRole({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`query` (Optional, { bool, exists, ids, match, match_all, prefix, range, simple_query_string, term, terms, wildcard })*: A query to filter which roles to return. +If the query parameter is missing, it is equivalent to a `match_all` query. +The query supports a subset of query types, including `match_all`, `bool`, `term`, `terms`, `match`, +`ids`, `prefix`, `wildcard`, `exists`, `range`, and `simple_query_string`. +You can query the following information associated with roles: `name`, `description`, `metadata`, +`applications.application`, `applications.privileges`, and `applications.resources`. +** *`from` (Optional, number)*: The starting document offset. +It must not be negative. +By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. +To page through more hits, use the `search_after` parameter. +** *`sort` (Optional, string | { _score, _doc, _geo_distance, _script } | string | { _score, _doc, _geo_distance, _script }[])*: The sort definition. +You can sort on `username`, `roles`, or `enabled`. +In addition, sort can also be applied to the `_doc` field to sort by index order. +** *`size` (Optional, number)*: The number of hits to return. +It must not be negative. +By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. +To page through more hits, use the `search_after` parameter. +** *`search_after` (Optional, number | number | string | boolean | null[])*: The search after definition. + +[discrete] +==== query_user +Find users with a query. + +Get information for users in a paginated manner. +You can optionally filter the results with a query. + +NOTE: As opposed to the get user API, built-in users are excluded from the result. +This API is only for native users. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-query-user[Endpoint documentation] +[source,ts] +---- +client.security.queryUser({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`query` (Optional, { ids, bool, exists, match, match_all, prefix, range, simple_query_string, term, terms, wildcard })*: A query to filter which users to return. +If the query parameter is missing, it is equivalent to a `match_all` query. +The query supports a subset of query types, including `match_all`, `bool`, `term`, `terms`, `match`, +`ids`, `prefix`, `wildcard`, `exists`, `range`, and `simple_query_string`. +You can query the following information associated with user: `username`, `roles`, `enabled`, `full_name`, and `email`. +** *`from` (Optional, number)*: The starting document offset. +It must not be negative. +By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. +To page through more hits, use the `search_after` parameter. +** *`sort` (Optional, string | { _score, _doc, _geo_distance, _script } | string | { _score, _doc, _geo_distance, _script }[])*: The sort definition. +Fields eligible for sorting are: `username`, `roles`, `enabled`. +In addition, sort can also be applied to the `_doc` field to sort by index order. +** *`size` (Optional, number)*: The number of hits to return. +It must not be negative. +By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. +To page through more hits, use the `search_after` parameter. +** *`search_after` (Optional, number | number | string | boolean | null[])*: The search after definition +** *`with_profile_uid` (Optional, boolean)*: Determines whether to retrieve the user profile UID, if it exists, for the users. + +[discrete] +==== saml_authenticate +Authenticate SAML. + +Submit a SAML response message to Elasticsearch for consumption. + +NOTE: This API is intended for use by custom web applications other than Kibana. +If you are using Kibana, refer to the documentation for configuring SAML single-sign-on on the Elastic Stack. + +The SAML message that is submitted can be: + +* A response to a SAML authentication request that was previously created using the SAML prepare authentication API. +* An unsolicited SAML message in the case of an IdP-initiated single sign-on (SSO) flow. + +In either case, the SAML message needs to be a base64 encoded XML document with a root element of ``. + +After successful validation, Elasticsearch responds with an Elasticsearch internal access token and refresh token that can be subsequently used for authentication. +This API endpoint essentially exchanges SAML responses that indicate successful authentication in the IdP for Elasticsearch access and refresh tokens, which can be used for authentication against Elasticsearch. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-saml-authenticate[Endpoint documentation] +[source,ts] +---- +client.security.samlAuthenticate({ content, ids }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`content` (string)*: The SAML response as it was sent by the user's browser, usually a Base64 encoded XML document. +** *`ids` (string | string[])*: A JSON array with all the valid SAML Request Ids that the caller of the API has for the current user. +** *`realm` (Optional, string)*: The name of the realm that should authenticate the SAML response. Useful in cases where many SAML realms are defined. + +[discrete] +==== saml_complete_logout +Logout of SAML completely. + +Verifies the logout response sent from the SAML IdP. + +NOTE: This API is intended for use by custom web applications other than Kibana. +If you are using Kibana, refer to the documentation for configuring SAML single-sign-on on the Elastic Stack. + +The SAML IdP may send a logout response back to the SP after handling the SP-initiated SAML Single Logout. +This API verifies the response by ensuring the content is relevant and validating its signature. +An empty response is returned if the verification process is successful. +The response can be sent by the IdP with either the HTTP-Redirect or the HTTP-Post binding. +The caller of this API must prepare the request accordingly so that this API can handle either of them. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-saml-complete-logout[Endpoint documentation] +[source,ts] +---- +client.security.samlCompleteLogout({ realm, ids }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`realm` (string)*: The name of the SAML realm in Elasticsearch for which the configuration is used to verify the logout response. +** *`ids` (string | string[])*: A JSON array with all the valid SAML Request Ids that the caller of the API has for the current user. +** *`query_string` (Optional, string)*: If the SAML IdP sends the logout response with the HTTP-Redirect binding, this field must be set to the query string of the redirect URI. +** *`content` (Optional, string)*: If the SAML IdP sends the logout response with the HTTP-Post binding, this field must be set to the value of the SAMLResponse form parameter from the logout response. + +[discrete] +==== saml_invalidate +Invalidate SAML. + +Submit a SAML LogoutRequest message to Elasticsearch for consumption. + +NOTE: This API is intended for use by custom web applications other than Kibana. +If you are using Kibana, refer to the documentation for configuring SAML single-sign-on on the Elastic Stack. + +The logout request comes from the SAML IdP during an IdP initiated Single Logout. +The custom web application can use this API to have Elasticsearch process the `LogoutRequest`. +After successful validation of the request, Elasticsearch invalidates the access token and refresh token that corresponds to that specific SAML principal and provides a URL that contains a SAML LogoutResponse message. +Thus the user can be redirected back to their IdP. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-saml-invalidate[Endpoint documentation] +[source,ts] +---- +client.security.samlInvalidate({ query_string }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`query_string` (string)*: The query part of the URL that the user was redirected to by the SAML IdP to initiate the Single Logout. +This query should include a single parameter named `SAMLRequest` that contains a SAML logout request that is deflated and Base64 encoded. +If the SAML IdP has signed the logout request, the URL should include two extra parameters named `SigAlg` and `Signature` that contain the algorithm used for the signature and the signature value itself. +In order for Elasticsearch to be able to verify the IdP's signature, the value of the `query_string` field must be an exact match to the string provided by the browser. +The client application must not attempt to parse or process the string in any way. +** *`acs` (Optional, string)*: The Assertion Consumer Service URL that matches the one of the SAML realm in Elasticsearch that should be used. You must specify either this parameter or the `realm` parameter. +** *`realm` (Optional, string)*: The name of the SAML realm in Elasticsearch the configuration. You must specify either this parameter or the `acs` parameter. + +[discrete] +==== saml_logout +Logout of SAML. + +Submits a request to invalidate an access token and refresh token. + +NOTE: This API is intended for use by custom web applications other than Kibana. +If you are using Kibana, refer to the documentation for configuring SAML single-sign-on on the Elastic Stack. + +This API invalidates the tokens that were generated for a user by the SAML authenticate API. +If the SAML realm in Elasticsearch is configured accordingly and the SAML IdP supports this, the Elasticsearch response contains a URL to redirect the user to the IdP that contains a SAML logout request (starting an SP-initiated SAML Single Logout). + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-saml-logout[Endpoint documentation] +[source,ts] +---- +client.security.samlLogout({ token }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`token` (string)*: The access token that was returned as a response to calling the SAML authenticate API. +Alternatively, the most recent token that was received after refreshing the original one by using a `refresh_token`. +** *`refresh_token` (Optional, string)*: The refresh token that was returned as a response to calling the SAML authenticate API. +Alternatively, the most recent refresh token that was received after refreshing the original access token. + +[discrete] +==== saml_prepare_authentication +Prepare SAML authentication. + +Create a SAML authentication request (``) as a URL string based on the configuration of the respective SAML realm in Elasticsearch. + +NOTE: This API is intended for use by custom web applications other than Kibana. +If you are using Kibana, refer to the documentation for configuring SAML single-sign-on on the Elastic Stack. + +This API returns a URL pointing to the SAML Identity Provider. +You can use the URL to redirect the browser of the user in order to continue the authentication process. +The URL includes a single parameter named `SAMLRequest`, which contains a SAML Authentication request that is deflated and Base64 encoded. +If the configuration dictates that SAML authentication requests should be signed, the URL has two extra parameters named `SigAlg` and `Signature`. +These parameters contain the algorithm used for the signature and the signature value itself. +It also returns a random string that uniquely identifies this SAML Authentication request. +The caller of this API needs to store this identifier as it needs to be used in a following step of the authentication process. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-saml-prepare-authentication[Endpoint documentation] +[source,ts] +---- +client.security.samlPrepareAuthentication({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`acs` (Optional, string)*: The Assertion Consumer Service URL that matches the one of the SAML realms in Elasticsearch. +The realm is used to generate the authentication request. You must specify either this parameter or the `realm` parameter. +** *`realm` (Optional, string)*: The name of the SAML realm in Elasticsearch for which the configuration is used to generate the authentication request. +You must specify either this parameter or the `acs` parameter. +** *`relay_state` (Optional, string)*: A string that will be included in the redirect URL that this API returns as the `RelayState` query parameter. +If the Authentication Request is signed, this value is used as part of the signature computation. + +[discrete] +==== saml_service_provider_metadata +Create SAML service provider metadata. + +Generate SAML metadata for a SAML 2.0 Service Provider. + +The SAML 2.0 specification provides a mechanism for Service Providers to describe their capabilities and configuration using a metadata file. +This API generates Service Provider metadata based on the configuration of a SAML realm in Elasticsearch. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-saml-service-provider-metadata[Endpoint documentation] +[source,ts] +---- +client.security.samlServiceProviderMetadata({ realm_name }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`realm_name` (string)*: The name of the SAML realm in Elasticsearch. + +[discrete] +==== suggest_user_profiles +Suggest a user profile. + +Get suggestions for user profiles that match specified search criteria. + +NOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions. +Individual users and external applications should not call this API directly. +Elastic reserves the right to change or remove this feature in future releases without prior notice. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-suggest-user-profiles[Endpoint documentation] +[source,ts] +---- +client.security.suggestUserProfiles({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`name` (Optional, string)*: A query string used to match name-related fields in user profile documents. +Name-related fields are the user's `username`, `full_name`, and `email`. +** *`size` (Optional, number)*: The number of profiles to return. +** *`data` (Optional, string | string[])*: A list of filters for the `data` field of the profile document. +To return all content use `data=*`. +To return a subset of content, use `data=` to retrieve content nested under the specified ``. +By default, the API returns no `data` content. +It is an error to specify `data` as both the query parameter and the request body field. +** *`hint` (Optional, { uids, labels })*: Extra search criteria to improve relevance of the suggestion result. +Profiles matching the spcified hint are ranked higher in the response. +Profiles not matching the hint aren't excluded from the response as long as the profile matches the `name` field query. + +[discrete] +==== update_api_key +Update an API key. + +Update attributes of an existing API key. +This API supports updates to an API key's access scope, expiration, and metadata. + +To use this API, you must have at least the `manage_own_api_key` cluster privilege. +Users can only update API keys that they created or that were granted to them. +To update another user’s API key, use the `run_as` feature to submit a request on behalf of another user. + +IMPORTANT: It's not possible to use an API key as the authentication credential for this API. The owner user’s credentials are required. + +Use this API to update API keys created by the create API key or grant API Key APIs. +If you need to apply the same update to many API keys, you can use the bulk update API keys API to reduce overhead. +It's not possible to update expired API keys or API keys that have been invalidated by the invalidate API key API. + +The access scope of an API key is derived from the `role_descriptors` you specify in the request and a snapshot of the owner user's permissions at the time of the request. +The snapshot of the owner's permissions is updated automatically on every call. + +IMPORTANT: If you don't specify `role_descriptors` in the request, a call to this API might still change the API key's access scope. +This change can occur if the owner user's permissions have changed since the API key was created or last modified. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-update-api-key[Endpoint documentation] +[source,ts] +---- +client.security.updateApiKey({ id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`id` (string)*: The ID of the API key to update. +** *`role_descriptors` (Optional, Record)*: The role descriptors to assign to this API key. +The API key's effective permissions are an intersection of its assigned privileges and the point in time snapshot of permissions of the owner user. +You can assign new privileges by specifying them in this parameter. +To remove assigned privileges, you can supply an empty `role_descriptors` parameter, that is to say, an empty object `{}`. +If an API key has no assigned privileges, it inherits the owner user's full permissions. +The snapshot of the owner's permissions is always updated, whether you supply the `role_descriptors` parameter or not. +The structure of a role descriptor is the same as the request for the create API keys API. +** *`metadata` (Optional, Record)*: Arbitrary metadata that you want to associate with the API key. +It supports a nested data structure. +Within the metadata object, keys beginning with `_` are reserved for system usage. +When specified, this value fully replaces the metadata previously associated with the API key. +** *`expiration` (Optional, string | -1 | 0)*: The expiration time for the API key. +By default, API keys never expire. +This property can be omitted to leave the expiration unchanged. + +[discrete] +==== update_cross_cluster_api_key +Update a cross-cluster API key. + +Update the attributes of an existing cross-cluster API key, which is used for API key based remote cluster access. + +To use this API, you must have at least the `manage_security` cluster privilege. +Users can only update API keys that they created. +To update another user's API key, use the `run_as` feature to submit a request on behalf of another user. + +IMPORTANT: It's not possible to use an API key as the authentication credential for this API. +To update an API key, the owner user's credentials are required. + +It's not possible to update expired API keys, or API keys that have been invalidated by the invalidate API key API. + +This API supports updates to an API key's access scope, metadata, and expiration. +The owner user's information, such as the `username` and `realm`, is also updated automatically on every call. + +NOTE: This API cannot update REST API keys, which should be updated by either the update API key or bulk update API keys API. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-update-cross-cluster-api-key[Endpoint documentation] +[source,ts] +---- +client.security.updateCrossClusterApiKey({ id, access }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`id` (string)*: The ID of the cross-cluster API key to update. +** *`access` ({ replication, search })*: The access to be granted to this API key. +The access is composed of permissions for cross cluster search and cross cluster replication. +At least one of them must be specified. +When specified, the new access assignment fully replaces the previously assigned access. +** *`expiration` (Optional, string | -1 | 0)*: The expiration time for the API key. +By default, API keys never expire. This property can be omitted to leave the value unchanged. +** *`metadata` (Optional, Record)*: Arbitrary metadata that you want to associate with the API key. +It supports nested data structure. +Within the metadata object, keys beginning with `_` are reserved for system usage. +When specified, this information fully replaces metadata previously associated with the API key. + +[discrete] +==== update_settings +Update security index settings. + +Update the user-configurable settings for the security internal index (`.security` and associated indices). Only a subset of settings are allowed to be modified. This includes `index.auto_expand_replicas` and `index.number_of_replicas`. + +NOTE: If `index.auto_expand_replicas` is set, `index.number_of_replicas` will be ignored during updates. + +If a specific index is not in use on the system and settings are provided for it, the request will be rejected. +This API does not yet support configuring the settings for indices before they are in use. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-update-settings[Endpoint documentation] +[source,ts] +---- +client.security.updateSettings({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`security` (Optional, { index })*: Settings for the index used for most security configuration, including native realm users and roles configured with the API. +** *`security-profile` (Optional, { index })*: Settings for the index used to store profile information. +** *`security-tokens` (Optional, { index })*: Settings for the index used to store tokens. +** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +** *`timeout` (Optional, string | -1 | 0)*: The period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. + +[discrete] +==== update_user_profile_data +Update user profile data. + +Update specific data for the user profile that is associated with a unique ID. + +NOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions. +Individual users and external applications should not call this API directly. +Elastic reserves the right to change or remove this feature in future releases without prior notice. + +To use this API, you must have one of the following privileges: + +* The `manage_user_profile` cluster privilege. +* The `update_profile_data` global privilege for the namespaces that are referenced in the request. + +This API updates the `labels` and `data` fields of an existing user profile document with JSON objects. +New keys and their values are added to the profile document and conflicting keys are replaced by data that's included in the request. + +For both labels and data, content is namespaced by the top-level fields. +The `update_profile_data` global privilege grants privileges for updating only the allowed namespaces. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-update-user-profile-data[Endpoint documentation] +[source,ts] +---- +client.security.updateUserProfileData({ uid }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`uid` (string)*: A unique identifier for the user profile. +** *`labels` (Optional, Record)*: Searchable data that you want to associate with the user profile. +This field supports a nested data structure. +Within the labels object, top-level keys cannot begin with an underscore (`_`) or contain a period (`.`). +** *`data` (Optional, Record)*: Non-searchable data that you want to associate with the user profile. +This field supports a nested data structure. +Within the `data` object, top-level keys cannot begin with an underscore (`_`) or contain a period (`.`). +The data object is not searchable, but can be retrieved with the get user profile API. +** *`if_seq_no` (Optional, number)*: Only perform the operation if the document has this sequence number. +** *`if_primary_term` (Optional, number)*: Only perform the operation if the document has this primary term. +** *`refresh` (Optional, Enum(true | false | "wait_for"))*: If 'true', Elasticsearch refreshes the affected shards to make this operation +visible to search. +If 'wait_for', it waits for a refresh to make this operation visible to search. +If 'false', nothing is done with refreshes. + +[discrete] +=== shutdown +[discrete] +==== delete_node +Cancel node shutdown preparations. +Remove a node from the shutdown list so it can resume normal operations. +You must explicitly clear the shutdown request when a node rejoins the cluster or when a node has permanently left the cluster. +Shutdown requests are never removed automatically by Elasticsearch. + +NOTE: This feature is designed for indirect use by Elastic Cloud, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. +Direct use is not supported. + +If the operator privileges feature is enabled, you must be an operator to use this API. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-shutdown-delete-node[Endpoint documentation] +[source,ts] +---- +client.shutdown.deleteNode({ node_id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`node_id` (string)*: The node id of node to be removed from the shutdown state +** *`master_timeout` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +** *`timeout` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + +[discrete] +==== get_node +Get the shutdown status. + +Get information about nodes that are ready to be shut down, have shut down preparations still in progress, or have stalled. +The API returns status information for each part of the shut down process. + +NOTE: This feature is designed for indirect use by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. + +If the operator privileges feature is enabled, you must be an operator to use this API. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-shutdown-get-node[Endpoint documentation] +[source,ts] +---- +client.shutdown.getNode({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`node_id` (Optional, string | string[])*: Which node for which to retrieve the shutdown status +** *`master_timeout` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. + +[discrete] +==== put_node +Prepare a node to be shut down. + +NOTE: This feature is designed for indirect use by Elastic Cloud, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. + +If you specify a node that is offline, it will be prepared for shut down when it rejoins the cluster. + +If the operator privileges feature is enabled, you must be an operator to use this API. + +The API migrates ongoing tasks and index shards to other nodes as needed to prepare a node to be restarted or shut down and removed from the cluster. +This ensures that Elasticsearch can be stopped safely with minimal disruption to the cluster. + +You must specify the type of shutdown: `restart`, `remove`, or `replace`. +If a node is already being prepared for shutdown, you can use this API to change the shutdown type. + +IMPORTANT: This API does NOT terminate the Elasticsearch process. +Monitor the node shutdown status to determine when it is safe to stop Elasticsearch. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-shutdown-put-node[Endpoint documentation] +[source,ts] +---- +client.shutdown.putNode({ node_id, type, reason }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`node_id` (string)*: The node identifier. +This parameter is not validated against the cluster's active nodes. +This enables you to register a node for shut down while it is offline. +No error is thrown if you specify an invalid node ID. +** *`type` (Enum("restart" | "remove" | "replace"))*: Valid values are restart, remove, or replace. +Use restart when you need to temporarily shut down a node to perform an upgrade, make configuration changes, or perform other maintenance. +Because the node is expected to rejoin the cluster, data is not migrated off of the node. +Use remove when you need to permanently remove a node from the cluster. +The node is not marked ready for shutdown until data is migrated off of the node Use replace to do a 1:1 replacement of a node with another node. +Certain allocation decisions will be ignored (such as disk watermarks) in the interest of true replacement of the source node with the target node. +During a replace-type shutdown, rollover and index creation may result in unassigned shards, and shrink may fail until the replacement is complete. +** *`reason` (string)*: A human-readable reason that the node is being shut down. +This field provides information for other cluster operators; it does not affect the shut down process. +** *`allocation_delay` (Optional, string)*: Only valid if type is restart. +Controls how long Elasticsearch will wait for the node to restart and join the cluster before reassigning its shards to other nodes. +This works the same as delaying allocation with the index.unassigned.node_left.delayed_timeout setting. +If you specify both a restart allocation delay and an index-level allocation delay, the longer of the two is used. +** *`target_node_name` (Optional, string)*: Only valid if type is replace. +Specifies the name of the node that is replacing the node being shut down. +Shards from the shut down node are only allowed to be allocated to the target node, and no other data will be allocated to the target node. +During relocation of data certain allocation rules are ignored, such as disk watermarks or user attribute filtering rules. +** *`master_timeout` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))*: The period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +** *`timeout` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))*: The period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. + +[discrete] +=== simulate +[discrete] +==== ingest +Simulate data ingestion. +Run ingest pipelines against a set of provided documents, optionally with substitute pipeline definitions, to simulate ingesting data into an index. + +This API is meant to be used for troubleshooting or pipeline development, as it does not actually index any data into Elasticsearch. + +The API runs the default and final pipeline for that index against a set of documents provided in the body of the request. +If a pipeline contains a reroute processor, it follows that reroute processor to the new index, running that index's pipelines as well the same way that a non-simulated ingest would. +No data is indexed into Elasticsearch. +Instead, the transformed document is returned, along with the list of pipelines that have been run and the name of the index where the document would have been indexed if this were not a simulation. +The transformed document is validated against the mappings that would apply to this index, and any validation error is reported in the result. + +This API differs from the simulate pipeline API in that you specify a single pipeline for that API, and it runs only that one pipeline. +The simulate pipeline API is more useful for developing a single pipeline, while the simulate ingest API is more useful for troubleshooting the interaction of the various pipelines that get applied when ingesting into an index. + +By default, the pipeline definitions that are currently in the system are used. +However, you can supply substitute pipeline definitions in the body of the request. +These will be used in place of the pipeline definitions that are already in the system. This can be used to replace existing pipeline definitions or to create new ones. The pipeline substitutions are used only within this request. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-simulate-ingest[Endpoint documentation] +[source,ts] +---- +client.simulate.ingest({ docs }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`docs` ({ _id, _index, _source }[])*: Sample documents to test in the pipeline. +** *`index` (Optional, string)*: The index to simulate ingesting into. +This value can be overridden by specifying an index on each document. +If you specify this parameter in the request path, it is used for any documents that do not explicitly specify an index argument. +** *`component_template_substitutions` (Optional, Record)*: A map of component template names to substitute component template definition objects. +** *`index_template_substitutions` (Optional, Record)*: A map of index template names to substitute index template definition objects. +** *`mapping_addition` (Optional, { all_field, date_detection, dynamic, dynamic_date_formats, dynamic_templates, _field_names, index_field, _meta, numeric_detection, properties, _routing, _size, _source, runtime, enabled, subobjects, _data_stream_timestamp })* +** *`pipeline_substitutions` (Optional, Record)*: Pipelines to test. +If you don’t specify the `pipeline` request path parameter, this parameter is required. +If you specify both this and the request path parameter, the API only uses the request path parameter. +** *`pipeline` (Optional, string)*: The pipeline to use as the default pipeline. +This value can be used to override the default pipeline of the index. + +[discrete] +=== slm +[discrete] +==== delete_lifecycle +Delete a policy. +Delete a snapshot lifecycle policy definition. +This operation prevents any future snapshots from being taken but does not cancel in-progress snapshots or remove previously-taken snapshots. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-delete-lifecycle[Endpoint documentation] +[source,ts] +---- +client.slm.deleteLifecycle({ policy_id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`policy_id` (string)*: The id of the snapshot lifecycle policy to remove +** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +** *`timeout` (Optional, string | -1 | 0)*: The period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. + +[discrete] +==== execute_lifecycle +Run a policy. +Immediately create a snapshot according to the snapshot lifecycle policy without waiting for the scheduled time. +The snapshot policy is normally applied according to its schedule, but you might want to manually run a policy before performing an upgrade or other maintenance. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-execute-lifecycle[Endpoint documentation] +[source,ts] +---- +client.slm.executeLifecycle({ policy_id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`policy_id` (string)*: The id of the snapshot lifecycle policy to be executed +** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +** *`timeout` (Optional, string | -1 | 0)*: The period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. + +[discrete] +==== execute_retention +Run a retention policy. +Manually apply the retention policy to force immediate removal of snapshots that are expired according to the snapshot lifecycle policy retention rules. +The retention policy is normally applied according to its schedule. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-execute-retention[Endpoint documentation] +[source,ts] +---- +client.slm.executeRetention({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +** *`timeout` (Optional, string | -1 | 0)*: The period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. + +[discrete] +==== get_lifecycle +Get policy information. +Get snapshot lifecycle policy definitions and information about the latest snapshot attempts. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-get-lifecycle[Endpoint documentation] +[source,ts] +---- +client.slm.getLifecycle({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`policy_id` (Optional, string | string[])*: List of snapshot lifecycle policies to retrieve +** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +** *`timeout` (Optional, string | -1 | 0)*: The period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. + +[discrete] +==== get_stats +Get snapshot lifecycle management statistics. +Get global and policy-level statistics about actions taken by snapshot lifecycle management. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-get-stats[Endpoint documentation] +[source,ts] +---- +client.slm.getStats({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + +[discrete] +==== get_status +Get the snapshot lifecycle management status. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-get-status[Endpoint documentation] +[source,ts] +---- +client.slm.getStatus({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +To indicate that the request should never timeout, set it to `-1`. +** *`timeout` (Optional, string | -1 | 0)*: The period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. +To indicate that the request should never timeout, set it to `-1`. + +[discrete] +==== put_lifecycle +Create or update a policy. +Create or update a snapshot lifecycle policy. +If the policy already exists, this request increments the policy version. +Only the latest version of a policy is stored. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-put-lifecycle[Endpoint documentation] +[source,ts] +---- +client.slm.putLifecycle({ policy_id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`policy_id` (string)*: The identifier for the snapshot lifecycle policy you want to create or update. +** *`config` (Optional, { ignore_unavailable, indices, include_global_state, feature_states, metadata, partial })*: Configuration for each snapshot created by the policy. +** *`name` (Optional, string)*: Name automatically assigned to each snapshot created by the policy. Date math is supported. To prevent conflicting snapshot names, a UUID is automatically appended to each snapshot name. +** *`repository` (Optional, string)*: Repository used to store snapshots created by this policy. This repository must exist prior to the policy’s creation. You can create a repository using the snapshot repository API. +** *`retention` (Optional, { expire_after, max_count, min_count })*: Retention rules used to retain and delete snapshots created by the policy. +** *`schedule` (Optional, string)*: Periodic or absolute schedule at which the policy creates snapshots. SLM applies schedule changes immediately. +** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +To indicate that the request should never timeout, set it to `-1`. +** *`timeout` (Optional, string | -1 | 0)*: The period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. +To indicate that the request should never timeout, set it to `-1`. + +[discrete] +==== start +Start snapshot lifecycle management. +Snapshot lifecycle management (SLM) starts automatically when a cluster is formed. +Manually starting SLM is necessary only if it has been stopped using the stop SLM API. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-start[Endpoint documentation] +[source,ts] +---- +client.slm.start({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +To indicate that the request should never timeout, set it to `-1`. +** *`timeout` (Optional, string | -1 | 0)*: The period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. +To indicate that the request should never timeout, set it to `-1`. + +[discrete] +==== stop +Stop snapshot lifecycle management. +Stop all snapshot lifecycle management (SLM) operations and the SLM plugin. +This API is useful when you are performing maintenance on a cluster and need to prevent SLM from performing any actions on your data streams or indices. +Stopping SLM does not stop any snapshots that are in progress. +You can manually trigger snapshots with the run snapshot lifecycle policy API even if SLM is stopped. + +The API returns a response as soon as the request is acknowledged, but the plugin might continue to run until in-progress operations complete and it can be safely stopped. +Use the get snapshot lifecycle management status API to see if SLM is running. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-stop[Endpoint documentation] +[source,ts] +---- +client.slm.stop({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +To indicate that the request should never timeout, set it to `-1`. +** *`timeout` (Optional, string | -1 | 0)*: The period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. +To indicate that the request should never timeout, set it to `-1`. + +[discrete] +=== snapshot +[discrete] +==== cleanup_repository +Clean up the snapshot repository. +Trigger the review of the contents of a snapshot repository and delete any stale data not referenced by existing snapshots. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-cleanup-repository[Endpoint documentation] +[source,ts] +---- +client.snapshot.cleanupRepository({ repository }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`repository` (string)*: The name of the snapshot repository to clean up. +** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for a connection to the master node. +If the master node is not available before the timeout expires, the request fails and returns an error. +To indicate that the request should never timeout, set it to `-1` +** *`timeout` (Optional, string | -1 | 0)*: The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. +If no response is received before the timeout expires, the cluster metadata update still applies but the response will indicate that it was not completely acknowledged. +To indicate that the request should never timeout, set it to `-1`. + +[discrete] +==== clone +Clone a snapshot. +Clone part of all of a snapshot into another snapshot in the same repository. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-clone[Endpoint documentation] +[source,ts] +---- +client.snapshot.clone({ repository, snapshot, target_snapshot, indices }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`repository` (string)*: The name of the snapshot repository that both source and target snapshot belong to. +** *`snapshot` (string)*: The source snapshot name. +** *`target_snapshot` (string)*: The target snapshot name. +** *`indices` (string)*: A list of indices to include in the snapshot. +Multi-target syntax is supported. +** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for the master node. +If the master node is not available before the timeout expires, the request fails and returns an error. +To indicate that the request should never timeout, set it to `-1`. +** *`timeout` (Optional, string | -1 | 0)*: The period of time to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. + +[discrete] +==== create +Create a snapshot. +Take a snapshot of a cluster or of data streams and indices. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-create[Endpoint documentation] +[source,ts] +---- +client.snapshot.create({ repository, snapshot }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`repository` (string)*: The name of the repository for the snapshot. +** *`snapshot` (string)*: The name of the snapshot. +It supportes date math. +It must be unique in the repository. +** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Determines how wildcard patterns in the `indices` parameter match data streams and indices. +It supports a list of values such as `open,hidden`. +** *`feature_states` (Optional, string[])*: The feature states to include in the snapshot. +Each feature state includes one or more system indices containing related data. +You can view a list of eligible features using the get features API. + +If `include_global_state` is `true`, all current feature states are included by default. +If `include_global_state` is `false`, no feature states are included by default. + +Note that specifying an empty array will result in the default behavior. +To exclude all feature states, regardless of the `include_global_state` value, specify an array with only the value `none` (`["none"]`). +** *`ignore_unavailable` (Optional, boolean)*: If `true`, the request ignores data streams and indices in `indices` that are missing or closed. +If `false`, the request returns an error for any data stream or index that is missing or closed. +** *`include_global_state` (Optional, boolean)*: If `true`, the current cluster state is included in the snapshot. +The cluster state includes persistent cluster settings, composable index templates, legacy index templates, ingest pipelines, and ILM policies. +It also includes data stored in system indices, such as Watches and task records (configurable via `feature_states`). +** *`indices` (Optional, string | string[])*: A list of data streams and indices to include in the snapshot. +It supports a multi-target syntax. +The default is an empty array (`[]`), which includes all regular data streams and regular indices. +To exclude all data streams and indices, use `-*`. + +You can't use this parameter to include or exclude system indices or system data streams from a snapshot. +Use `feature_states` instead. +** *`metadata` (Optional, Record)*: Arbitrary metadata to the snapshot, such as a record of who took the snapshot, why it was taken, or any other useful data. +It can have any contents but it must be less than 1024 bytes. +This information is not automatically generated by Elasticsearch. +** *`partial` (Optional, boolean)*: If `true`, it enables you to restore a partial snapshot of indices with unavailable shards. +Only shards that were successfully included in the snapshot will be restored. +All missing shards will be recreated as empty. + +If `false`, the entire restore operation will fail if one or more indices included in the snapshot do not have all primary shards available. +** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +** *`wait_for_completion` (Optional, boolean)*: If `true`, the request returns a response when the snapshot is complete. +If `false`, the request returns a response when the snapshot initializes. + +[discrete] +==== create_repository +Create or update a snapshot repository. +IMPORTANT: If you are migrating searchable snapshots, the repository name must be identical in the source and destination clusters. +To register a snapshot repository, the cluster's global metadata must be writeable. +Ensure there are no cluster blocks (for example, `cluster.blocks.read_only` and `clsuter.blocks.read_only_allow_delete` settings) that prevent write access. + +Several options for this API can be specified using a query parameter or a request body parameter. +If both parameters are specified, only the query parameter is used. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-create-repository[Endpoint documentation] +[source,ts] +---- +client.snapshot.createRepository({ repository }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`repository` (string)*: The name of the snapshot repository to register or update. +** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for the master node. +If the master node is not available before the timeout expires, the request fails and returns an error. +To indicate that the request should never timeout, set it to `-1`. +** *`timeout` (Optional, string | -1 | 0)*: The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. +If no response is received before the timeout expires, the cluster metadata update still applies but the response will indicate that it was not completely acknowledged. +To indicate that the request should never timeout, set it to `-1`. +** *`verify` (Optional, boolean)*: If `true`, the request verifies the repository is functional on all master and data nodes in the cluster. +If `false`, this verification is skipped. +You can also perform this verification with the verify snapshot repository API. + +[discrete] +==== delete +Delete snapshots. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-delete[Endpoint documentation] +[source,ts] +---- +client.snapshot.delete({ repository, snapshot }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`repository` (string)*: The name of the repository to delete a snapshot from. +** *`snapshot` (string)*: A list of snapshot names to delete. +It also accepts wildcards (`*`). +** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for the master node. +If the master node is not available before the timeout expires, the request fails and returns an error. +To indicate that the request should never timeout, set it to `-1`. + +[discrete] +==== delete_repository +Delete snapshot repositories. +When a repository is unregistered, Elasticsearch removes only the reference to the location where the repository is storing the snapshots. +The snapshots themselves are left untouched and in place. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-delete-repository[Endpoint documentation] +[source,ts] +---- +client.snapshot.deleteRepository({ repository }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`repository` (string | string[])*: The ame of the snapshot repositories to unregister. +Wildcard (`*`) patterns are supported. +** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for the master node. +If the master node is not available before the timeout expires, the request fails and returns an error. +To indicate that the request should never timeout, set it to `-1`. +** *`timeout` (Optional, string | -1 | 0)*: The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. +If no response is received before the timeout expires, the cluster metadata update still applies but the response will indicate that it was not completely acknowledged. +To indicate that the request should never timeout, set it to `-1`. + +[discrete] +==== get +Get snapshot information. + +NOTE: The `after` parameter and `next` field enable you to iterate through snapshots with some consistency guarantees regarding concurrent creation or deletion of snapshots. +It is guaranteed that any snapshot that exists at the beginning of the iteration and is not concurrently deleted will be seen during the iteration. +Snapshots concurrently created may be seen during an iteration. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-get[Endpoint documentation] +[source,ts] +---- +client.snapshot.get({ repository, snapshot }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`repository` (string)*: A list of snapshot repository names used to limit the request. +Wildcard (`*`) expressions are supported. +** *`snapshot` (string | string[])*: A list of snapshot names to retrieve +Wildcards (`*`) are supported. + +* To get information about all snapshots in a registered repository, use a wildcard (`*`) or `_all`. +* To get information about any snapshots that are currently running, use `_current`. +** *`after` (Optional, string)*: An offset identifier to start pagination from as returned by the next field in the response body. +** *`from_sort_value` (Optional, string)*: The value of the current sort column at which to start retrieval. +It can be a string `snapshot-` or a repository name when sorting by snapshot or repository name. +It can be a millisecond time value or a number when sorting by `index-` or shard count. +** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error for any snapshots that are unavailable. +** *`index_details` (Optional, boolean)*: If `true`, the response includes additional information about each index in the snapshot comprising the number of shards in the index, the total size of the index in bytes, and the maximum number of segments per shard in the index. +The default is `false`, meaning that this information is omitted. +** *`index_names` (Optional, boolean)*: If `true`, the response includes the name of each index in each snapshot. +** *`include_repository` (Optional, boolean)*: If `true`, the response includes the repository name in each snapshot. +** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +** *`order` (Optional, Enum("asc" | "desc"))*: The sort order. +Valid values are `asc` for ascending and `desc` for descending order. +The default behavior is ascending order. +** *`offset` (Optional, number)*: Numeric offset to start pagination from based on the snapshots matching this request. Using a non-zero value for this parameter is mutually exclusive with using the after parameter. Defaults to 0. +** *`size` (Optional, number)*: The maximum number of snapshots to return. +The default is 0, which means to return all that match the request without limit. +** *`slm_policy_filter` (Optional, string)*: Filter snapshots by a list of snapshot lifecycle management (SLM) policy names that snapshots belong to. + +You can use wildcards (`*`) and combinations of wildcards followed by exclude patterns starting with `-`. +For example, the pattern `*,-policy-a-\*` will return all snapshots except for those that were created by an SLM policy with a name starting with `policy-a-`. +Note that the wildcard pattern `*` matches all snapshots created by an SLM policy but not those snapshots that were not created by an SLM policy. +To include snapshots that were not created by an SLM policy, you can use the special pattern `_none` that will match all snapshots without an SLM policy. +** *`sort` (Optional, Enum("start_time" | "duration" | "name" | "index_count" | "repository" | "shard_count" | "failed_shard_count"))*: The sort order for the result. +The default behavior is sorting by snapshot start time stamp. +** *`verbose` (Optional, boolean)*: If `true`, returns additional information about each snapshot such as the version of Elasticsearch which took the snapshot, the start and end times of the snapshot, and the number of shards snapshotted. + +NOTE: The parameters `size`, `order`, `after`, `from_sort_value`, `offset`, `slm_policy_filter`, and `sort` are not supported when you set `verbose=false` and the sort order for requests with `verbose=false` is undefined. + +[discrete] +==== get_repository +Get snapshot repository information. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-get-repository[Endpoint documentation] +[source,ts] +---- +client.snapshot.getRepository({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`repository` (Optional, string | string[])*: A list of snapshot repository names used to limit the request. +Wildcard (`*`) expressions are supported including combining wildcards with exclude patterns starting with `-`. + +To get information about all snapshot repositories registered in the cluster, omit this parameter or use `*` or `_all`. +** *`local` (Optional, boolean)*: If `true`, the request gets information from the local node only. +If `false`, the request gets information from the master node. +** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for the master node. +If the master node is not available before the timeout expires, the request fails and returns an error. +To indicate that the request should never timeout, set it to `-1`. + +[discrete] +==== repository_analyze +Analyze a snapshot repository. +Analyze the performance characteristics and any incorrect behaviour found in a repository. + +The response exposes implementation details of the analysis which may change from version to version. +The response body format is therefore not considered stable and may be different in newer versions. + +There are a large number of third-party storage systems available, not all of which are suitable for use as a snapshot repository by Elasticsearch. +Some storage systems behave incorrectly, or perform poorly, especially when accessed concurrently by multiple clients as the nodes of an Elasticsearch cluster do. This API performs a collection of read and write operations on your repository which are designed to detect incorrect behaviour and to measure the performance characteristics of your storage system. + +The default values for the parameters are deliberately low to reduce the impact of running an analysis inadvertently and to provide a sensible starting point for your investigations. +Run your first analysis with the default parameter values to check for simple problems. +If successful, run a sequence of increasingly large analyses until you encounter a failure or you reach a `blob_count` of at least `2000`, a `max_blob_size` of at least `2gb`, a `max_total_data_size` of at least `1tb`, and a `register_operation_count` of at least `100`. +Always specify a generous timeout, possibly `1h` or longer, to allow time for each analysis to run to completion. +Perform the analyses using a multi-node cluster of a similar size to your production cluster so that it can detect any problems that only arise when the repository is accessed by many nodes at once. + +If the analysis fails, Elasticsearch detected that your repository behaved unexpectedly. +This usually means you are using a third-party storage system with an incorrect or incompatible implementation of the API it claims to support. +If so, this storage system is not suitable for use as a snapshot repository. +You will need to work with the supplier of your storage system to address the incompatibilities that Elasticsearch detects. + +If the analysis is successful, the API returns details of the testing process, optionally including how long each operation took. +You can use this information to determine the performance of your storage system. +If any operation fails or returns an incorrect result, the API returns an error. +If the API returns an error, it may not have removed all the data it wrote to the repository. +The error will indicate the location of any leftover data and this path is also recorded in the Elasticsearch logs. +You should verify that this location has been cleaned up correctly. +If there is still leftover data at the specified location, you should manually remove it. + +If the connection from your client to Elasticsearch is closed while the client is waiting for the result of the analysis, the test is cancelled. +Some clients are configured to close their connection if no response is received within a certain timeout. +An analysis takes a long time to complete so you might need to relax any such client-side timeouts. +On cancellation the analysis attempts to clean up the data it was writing, but it may not be able to remove it all. +The path to the leftover data is recorded in the Elasticsearch logs. +You should verify that this location has been cleaned up correctly. +If there is still leftover data at the specified location, you should manually remove it. + +If the analysis is successful then it detected no incorrect behaviour, but this does not mean that correct behaviour is guaranteed. +The analysis attempts to detect common bugs but it does not offer 100% coverage. +Additionally, it does not test the following: + +* Your repository must perform durable writes. Once a blob has been written it must remain in place until it is deleted, even after a power loss or similar disaster. +* Your repository must not suffer from silent data corruption. Once a blob has been written, its contents must remain unchanged until it is deliberately modified or deleted. +* Your repository must behave correctly even if connectivity from the cluster is disrupted. Reads and writes may fail in this case, but they must not return incorrect results. + +IMPORTANT: An analysis writes a substantial amount of data to your repository and then reads it back again. +This consumes bandwidth on the network between the cluster and the repository, and storage space and I/O bandwidth on the repository itself. +You must ensure this load does not affect other users of these systems. +Analyses respect the repository settings `max_snapshot_bytes_per_sec` and `max_restore_bytes_per_sec` if available and the cluster setting `indices.recovery.max_bytes_per_sec` which you can use to limit the bandwidth they consume. + +NOTE: This API is intended for exploratory use by humans. You should expect the request parameters and the response format to vary in future versions. + +NOTE: Different versions of Elasticsearch may perform different checks for repository compatibility, with newer versions typically being stricter than older ones. +A storage system that passes repository analysis with one version of Elasticsearch may fail with a different version. +This indicates it behaves incorrectly in ways that the former version did not detect. +You must work with the supplier of your storage system to address the incompatibilities detected by the repository analysis API in any version of Elasticsearch. + +NOTE: This API may not work correctly in a mixed-version cluster. + +*Implementation details* + +NOTE: This section of documentation describes how the repository analysis API works in this version of Elasticsearch, but you should expect the implementation to vary between versions. The request parameters and response format depend on details of the implementation so may also be different in newer versions. + +The analysis comprises a number of blob-level tasks, as set by the `blob_count` parameter and a number of compare-and-exchange operations on linearizable registers, as set by the `register_operation_count` parameter. +These tasks are distributed over the data and master-eligible nodes in the cluster for execution. + +For most blob-level tasks, the executing node first writes a blob to the repository and then instructs some of the other nodes in the cluster to attempt to read the data it just wrote. +The size of the blob is chosen randomly, according to the `max_blob_size` and `max_total_data_size` parameters. +If any of these reads fails then the repository does not implement the necessary read-after-write semantics that Elasticsearch requires. + +For some blob-level tasks, the executing node will instruct some of its peers to attempt to read the data before the writing process completes. +These reads are permitted to fail, but must not return partial data. +If any read returns partial data then the repository does not implement the necessary atomicity semantics that Elasticsearch requires. + +For some blob-level tasks, the executing node will overwrite the blob while its peers are reading it. +In this case the data read may come from either the original or the overwritten blob, but the read operation must not return partial data or a mix of data from the two blobs. +If any of these reads returns partial data or a mix of the two blobs then the repository does not implement the necessary atomicity semantics that Elasticsearch requires for overwrites. + +The executing node will use a variety of different methods to write the blob. +For instance, where applicable, it will use both single-part and multi-part uploads. +Similarly, the reading nodes will use a variety of different methods to read the data back again. +For instance they may read the entire blob from start to end or may read only a subset of the data. + +For some blob-level tasks, the executing node will cancel the write before it is complete. +In this case, it still instructs some of the other nodes in the cluster to attempt to read the blob but all of these reads must fail to find the blob. + +Linearizable registers are special blobs that Elasticsearch manipulates using an atomic compare-and-exchange operation. +This operation ensures correct and strongly-consistent behavior even when the blob is accessed by multiple nodes at the same time. +The detailed implementation of the compare-and-exchange operation on linearizable registers varies by repository type. +Repository analysis verifies that that uncontended compare-and-exchange operations on a linearizable register blob always succeed. +Repository analysis also verifies that contended operations either succeed or report the contention but do not return incorrect results. +If an operation fails due to contention, Elasticsearch retries the operation until it succeeds. +Most of the compare-and-exchange operations performed by repository analysis atomically increment a counter which is represented as an 8-byte blob. +Some operations also verify the behavior on small blobs with sizes other than 8 bytes. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-repository-analyze[Endpoint documentation] +[source,ts] +---- +client.snapshot.repositoryAnalyze({ repository }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`repository` (string)*: The name of the repository. +** *`blob_count` (Optional, number)*: The total number of blobs to write to the repository during the test. +For realistic experiments, you should set it to at least `2000`. +** *`concurrency` (Optional, number)*: The number of operations to run concurrently during the test. +** *`detailed` (Optional, boolean)*: Indicates whether to return detailed results, including timing information for every operation performed during the analysis. +If false, it returns only a summary of the analysis. +** *`early_read_node_count` (Optional, number)*: The number of nodes on which to perform an early read operation while writing each blob. +Early read operations are only rarely performed. +** *`max_blob_size` (Optional, number | string)*: The maximum size of a blob to be written during the test. +For realistic experiments, you should set it to at least `2gb`. +** *`max_total_data_size` (Optional, number | string)*: An upper limit on the total size of all the blobs written during the test. +For realistic experiments, you should set it to at least `1tb`. +** *`rare_action_probability` (Optional, number)*: The probability of performing a rare action such as an early read, an overwrite, or an aborted write on each blob. +** *`rarely_abort_writes` (Optional, boolean)*: Indicates whether to rarely cancel writes before they complete. +** *`read_node_count` (Optional, number)*: The number of nodes on which to read a blob after writing. +** *`register_operation_count` (Optional, number)*: The minimum number of linearizable register operations to perform in total. +For realistic experiments, you should set it to at least `100`. +** *`seed` (Optional, number)*: The seed for the pseudo-random number generator used to generate the list of operations performed during the test. +To repeat the same set of operations in multiple experiments, use the same seed in each experiment. +Note that the operations are performed concurrently so might not always happen in the same order on each run. +** *`timeout` (Optional, string | -1 | 0)*: The period of time to wait for the test to complete. +If no response is received before the timeout expires, the test is cancelled and returns an error. + +[discrete] +==== restore +Restore a snapshot. +Restore a snapshot of a cluster or data streams and indices. + +You can restore a snapshot only to a running cluster with an elected master node. +The snapshot repository must be registered and available to the cluster. +The snapshot and cluster versions must be compatible. + +To restore a snapshot, the cluster's global metadata must be writable. Ensure there are't any cluster blocks that prevent writes. The restore operation ignores index blocks. + +Before you restore a data stream, ensure the cluster contains a matching index template with data streams enabled. To check, use the index management feature in Kibana or the get index template API: + +---- +GET _index_template/*?filter_path=index_templates.name,index_templates.index_template.index_patterns,index_templates.index_template.data_stream +---- + +If no such template exists, you can create one or restore a cluster state that contains one. Without a matching index template, a data stream can't roll over or create backing indices. + +If your snapshot contains data from App Search or Workplace Search, you must restore the Enterprise Search encryption key before you restore the snapshot. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-restore[Endpoint documentation] +[source,ts] +---- +client.snapshot.restore({ repository, snapshot }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`repository` (string)*: The name of the repository to restore a snapshot from. +** *`snapshot` (string)*: The name of the snapshot to restore. +** *`feature_states` (Optional, string[])*: The feature states to restore. +If `include_global_state` is `true`, the request restores all feature states in the snapshot by default. +If `include_global_state` is `false`, the request restores no feature states by default. +Note that specifying an empty array will result in the default behavior. +To restore no feature states, regardless of the `include_global_state` value, specify an array containing only the value `none` (`["none"]`). +** *`ignore_index_settings` (Optional, string[])*: The index settings to not restore from the snapshot. +You can't use this option to ignore `index.number_of_shards`. + +For data streams, this option applies only to restored backing indices. +New backing indices are configured using the data stream's matching index template. +** *`ignore_unavailable` (Optional, boolean)*: If `true`, the request ignores any index or data stream in indices that's missing from the snapshot. +If `false`, the request returns an error for any missing index or data stream. +** *`include_aliases` (Optional, boolean)*: If `true`, the request restores aliases for any restored data streams and indices. +If `false`, the request doesn’t restore aliases. +** *`include_global_state` (Optional, boolean)*: If `true`, restore the cluster state. The cluster state includes: + +* Persistent cluster settings +* Index templates +* Legacy index templates +* Ingest pipelines +* Index lifecycle management (ILM) policies +* Stored scripts +* For snapshots taken after 7.12.0, feature states + +If `include_global_state` is `true`, the restore operation merges the legacy index templates in your cluster with the templates contained in the snapshot, replacing any existing ones whose name matches one in the snapshot. +It completely removes all persistent settings, non-legacy index templates, ingest pipelines, and ILM lifecycle policies that exist in your cluster and replaces them with the corresponding items from the snapshot. + +Use the `feature_states` parameter to configure how feature states are restored. + +If `include_global_state` is `true` and a snapshot was created without a global state then the restore request will fail. +** *`index_settings` (Optional, { index, mode, routing_path, soft_deletes, sort, number_of_shards, number_of_replicas, number_of_routing_shards, check_on_startup, codec, routing_partition_size, load_fixed_bitset_filters_eagerly, hidden, auto_expand_replicas, merge, search, refresh_interval, max_result_window, max_inner_result_window, max_rescore_window, max_docvalue_fields_search, max_script_fields, max_ngram_diff, max_shingle_diff, blocks, max_refresh_listeners, analyze, highlight, max_terms_count, max_regex_length, routing, gc_deletes, default_pipeline, final_pipeline, lifecycle, provided_name, creation_date, creation_date_string, uuid, version, verified_before_close, format, max_slices_per_scroll, translog, query_string, priority, top_metrics_max_size, analysis, settings, time_series, queries, similarity, mapping, indexing.slowlog, indexing_pressure, store })*: Index settings to add or change in restored indices, including backing indices. +You can't use this option to change `index.number_of_shards`. + +For data streams, this option applies only to restored backing indices. +New backing indices are configured using the data stream's matching index template. +** *`indices` (Optional, string | string[])*: A list of indices and data streams to restore. +It supports a multi-target syntax. +The default behavior is all regular indices and regular data streams in the snapshot. + +You can't use this parameter to restore system indices or system data streams. +Use `feature_states` instead. +** *`partial` (Optional, boolean)*: If `false`, the entire restore operation will fail if one or more indices included in the snapshot do not have all primary shards available. + +If true, it allows restoring a partial snapshot of indices with unavailable shards. +Only shards that were successfully included in the snapshot will be restored. +All missing shards will be recreated as empty. +** *`rename_pattern` (Optional, string)*: A rename pattern to apply to restored data streams and indices. +Data streams and indices matching the rename pattern will be renamed according to `rename_replacement`. + +The rename pattern is applied as defined by the regular expression that supports referencing the original text, according to the `appendReplacement` logic. +** *`rename_replacement` (Optional, string)*: The rename replacement string that is used with the `rename_pattern`. +** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for the master node. +If the master node is not available before the timeout expires, the request fails and returns an error. +To indicate that the request should never timeout, set it to `-1`. +** *`wait_for_completion` (Optional, boolean)*: If `true`, the request returns a response when the restore operation completes. +The operation is complete when it finishes all attempts to recover primary shards for restored indices. +This applies even if one or more of the recovery attempts fail. + +If `false`, the request returns a response when the restore operation initializes. + +[discrete] +==== status +Get the snapshot status. +Get a detailed description of the current state for each shard participating in the snapshot. + +Note that this API should be used only to obtain detailed shard-level information for ongoing snapshots. +If this detail is not needed or you want to obtain information about one or more existing snapshots, use the get snapshot API. + +If you omit the `` request path parameter, the request retrieves information only for currently running snapshots. +This usage is preferred. +If needed, you can specify `` and `` to retrieve information for specific snapshots, even if they're not currently running. + +WARNING: Using the API to return the status of any snapshots other than currently running snapshots can be expensive. +The API requires a read from the repository for each shard in each snapshot. +For example, if you have 100 snapshots with 1,000 shards each, an API request that includes all snapshots will require 100,000 reads (100 snapshots x 1,000 shards). + +Depending on the latency of your storage, such requests can take an extremely long time to return results. +These requests can also tax machine resources and, when using cloud storage, incur high processing costs. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-status[Endpoint documentation] +[source,ts] +---- +client.snapshot.status({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`repository` (Optional, string)*: The snapshot repository name used to limit the request. +It supports wildcards (`*`) if `` isn't specified. +** *`snapshot` (Optional, string | string[])*: A list of snapshots to retrieve status for. +The default is currently running snapshots. +Wildcards (`*`) are not supported. +** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error for any snapshots that are unavailable. +If `true`, the request ignores snapshots that are unavailable, such as those that are corrupted or temporarily cannot be returned. +** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for the master node. +If the master node is not available before the timeout expires, the request fails and returns an error. +To indicate that the request should never timeout, set it to `-1`. + +[discrete] +==== verify_repository +Verify a snapshot repository. +Check for common misconfigurations in a snapshot repository. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-verify-repository[Endpoint documentation] +[source,ts] +---- +client.snapshot.verifyRepository({ repository }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`repository` (string)*: The name of the snapshot repository to verify. +** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for the master node. +If the master node is not available before the timeout expires, the request fails and returns an error. +To indicate that the request should never timeout, set it to `-1`. +** *`timeout` (Optional, string | -1 | 0)*: The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. +If no response is received before the timeout expires, the cluster metadata update still applies but the response will indicate that it was not completely acknowledged. +To indicate that the request should never timeout, set it to `-1`. + +[discrete] +=== sql +[discrete] +==== clear_cursor +Clear an SQL search cursor. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-sql-clear-cursor[Endpoint documentation] +[source,ts] +---- +client.sql.clearCursor({ cursor }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`cursor` (string)*: Cursor to clear. + +[discrete] +==== delete_async +Delete an async SQL search. +Delete an async SQL search or a stored synchronous SQL search. +If the search is still running, the API cancels it. + +If the Elasticsearch security features are enabled, only the following users can use this API to delete a search: + +* Users with the `cancel_task` cluster privilege. +* The user who first submitted the search. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-sql-delete-async[Endpoint documentation] +[source,ts] +---- +client.sql.deleteAsync({ id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`id` (string)*: The identifier for the search. + +[discrete] +==== get_async +Get async SQL search results. +Get the current status and available results for an async SQL search or stored synchronous SQL search. + +If the Elasticsearch security features are enabled, only the user who first submitted the SQL search can retrieve the search using this API. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-sql-get-async[Endpoint documentation] +[source,ts] +---- +client.sql.getAsync({ id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`id` (string)*: The identifier for the search. +** *`delimiter` (Optional, string)*: The separator for CSV results. +The API supports this parameter only for CSV responses. +** *`format` (Optional, string)*: The format for the response. +You must specify a format using this parameter or the `Accept` HTTP header. +If you specify both, the API uses this parameter. +** *`keep_alive` (Optional, string | -1 | 0)*: The retention period for the search and its results. +It defaults to the `keep_alive` period for the original SQL search. +** *`wait_for_completion_timeout` (Optional, string | -1 | 0)*: The period to wait for complete results. +It defaults to no timeout, meaning the request waits for complete search results. + +[discrete] +==== get_async_status +Get the async SQL search status. +Get the current status of an async SQL search or a stored synchronous SQL search. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-sql-get-async-status[Endpoint documentation] +[source,ts] +---- +client.sql.getAsyncStatus({ id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`id` (string)*: The identifier for the search. + +[discrete] +==== query +Get SQL search results. +Run an SQL request. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-sql-query[Endpoint documentation] +[source,ts] +---- +client.sql.query({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`allow_partial_search_results` (Optional, boolean)*: If `true`, the response has partial results when there are shard request timeouts or shard failures. +If `false`, the API returns an error with no partial results. +** *`catalog` (Optional, string)*: The default catalog (cluster) for queries. +If unspecified, the queries execute on the data in the local cluster only. +** *`columnar` (Optional, boolean)*: If `true`, the results are in a columnar fashion: one row represents all the values of a certain column from the current page of results. +The API supports this parameter only for CBOR, JSON, SMILE, and YAML responses. +** *`cursor` (Optional, string)*: The cursor used to retrieve a set of paginated results. +If you specify a cursor, the API only uses the `columnar` and `time_zone` request body parameters. +It ignores other request body parameters. +** *`fetch_size` (Optional, number)*: The maximum number of rows (or entries) to return in one response. +** *`field_multi_value_leniency` (Optional, boolean)*: If `false`, the API returns an exception when encountering multiple values for a field. +If `true`, the API is lenient and returns the first value from the array with no guarantee of consistent results. +** *`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: The Elasticsearch query DSL for additional filtering. +** *`index_using_frozen` (Optional, boolean)*: If `true`, the search can run on frozen indices. +** *`keep_alive` (Optional, string | -1 | 0)*: The retention period for an async or saved synchronous search. +** *`keep_on_completion` (Optional, boolean)*: If `true`, Elasticsearch stores synchronous searches if you also specify the `wait_for_completion_timeout` parameter. +If `false`, Elasticsearch only stores async searches that don't finish before the `wait_for_completion_timeout`. +** *`page_timeout` (Optional, string | -1 | 0)*: The minimum retention period for the scroll cursor. +After this time period, a pagination request might fail because the scroll cursor is no longer available. +Subsequent scroll requests prolong the lifetime of the scroll cursor by the duration of `page_timeout` in the scroll request. +** *`params` (Optional, Record)*: The values for parameters in the query. +** *`query` (Optional, string)*: The SQL query to run. +** *`request_timeout` (Optional, string | -1 | 0)*: The timeout before the request fails. +** *`runtime_mappings` (Optional, Record)*: One or more runtime fields for the search request. +These fields take precedence over mapped fields with the same name. +** *`time_zone` (Optional, string)*: The ISO-8601 time zone ID for the search. +** *`wait_for_completion_timeout` (Optional, string | -1 | 0)*: The period to wait for complete results. +It defaults to no timeout, meaning the request waits for complete search results. +If the search doesn't finish within this period, the search becomes async. + +To save a synchronous search, you must specify this parameter and the `keep_on_completion` parameter. +** *`format` (Optional, Enum("csv" | "json" | "tsv" | "txt" | "yaml" | "cbor" | "smile"))*: The format for the response. +You can also specify a format using the `Accept` HTTP header. +If you specify both this parameter and the `Accept` HTTP header, this parameter takes precedence. + +[discrete] +==== translate +Translate SQL into Elasticsearch queries. +Translate an SQL search into a search API request containing Query DSL. +It accepts the same request body parameters as the SQL search API, excluding `cursor`. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-sql-translate[Endpoint documentation] +[source,ts] +---- +client.sql.translate({ query }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`query` (string)*: The SQL query to run. +** *`fetch_size` (Optional, number)*: The maximum number of rows (or entries) to return in one response. +** *`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: The Elasticsearch query DSL for additional filtering. +** *`time_zone` (Optional, string)*: The ISO-8601 time zone ID for the search. + +[discrete] +=== ssl +[discrete] +==== certificates +Get SSL certificates. + +Get information about the X.509 certificates that are used to encrypt communications in the cluster. +The API returns a list that includes certificates from all TLS contexts including: + +- Settings for transport and HTTP interfaces +- TLS settings that are used within authentication realms +- TLS settings for remote monitoring exporters + +The list includes certificates that are used for configuring trust, such as those configured in the `xpack.security.transport.ssl.truststore` and `xpack.security.transport.ssl.certificate_authorities` settings. +It also includes certificates that are used for configuring server identity, such as `xpack.security.http.ssl.keystore` and `xpack.security.http.ssl.certificate settings`. + +The list does not include certificates that are sourced from the default SSL context of the Java Runtime Environment (JRE), even if those certificates are in use within Elasticsearch. + +NOTE: When a PKCS#11 token is configured as the truststore of the JRE, the API returns all the certificates that are included in the PKCS#11 token irrespective of whether these are used in the Elasticsearch TLS configuration. + +If Elasticsearch is configured to use a keystore or truststore, the API output includes all certificates in that store, even though some of the certificates might not be in active use within the cluster. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ssl-certificates[Endpoint documentation] +[source,ts] +---- +client.ssl.certificates() +---- + + +[discrete] +=== synonyms +[discrete] +==== delete_synonym +Delete a synonym set. + +You can only delete a synonyms set that is not in use by any index analyzer. + +Synonyms sets can be used in synonym graph token filters and synonym token filters. +These synonym filters can be used as part of search analyzers. + +Analyzers need to be loaded when an index is restored (such as when a node starts, or the index becomes open). +Even if the analyzer is not used on any field mapping, it still needs to be loaded on the index recovery phase. + +If any analyzers cannot be loaded, the index becomes unavailable and the cluster status becomes red or yellow as index shards are not available. +To prevent that, synonyms sets that are used in analyzers can't be deleted. +A delete request in this case will return a 400 response code. + +To remove a synonyms set, you must first remove all indices that contain analyzers using it. +You can migrate an index by creating a new index that does not contain the token filter with the synonyms set, and use the reindex API in order to copy over the index data. +Once finished, you can delete the index. +When the synonyms set is not used in analyzers, you will be able to delete it. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-synonyms-delete-synonym[Endpoint documentation] +[source,ts] +---- +client.synonyms.deleteSynonym({ id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`id` (string)*: The synonyms set identifier to delete. + +[discrete] +==== delete_synonym_rule +Delete a synonym rule. +Delete a synonym rule from a synonym set. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-synonyms-delete-synonym-rule[Endpoint documentation] +[source,ts] +---- +client.synonyms.deleteSynonymRule({ set_id, rule_id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`set_id` (string)*: The ID of the synonym set to update. +** *`rule_id` (string)*: The ID of the synonym rule to delete. + +[discrete] +==== get_synonym +Get a synonym set. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-synonyms-get-synonym[Endpoint documentation] +[source,ts] +---- +client.synonyms.getSynonym({ id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`id` (string)*: The synonyms set identifier to retrieve. +** *`from` (Optional, number)*: The starting offset for query rules to retrieve. +** *`size` (Optional, number)*: The max number of query rules to retrieve. + +[discrete] +==== get_synonym_rule +Get a synonym rule. +Get a synonym rule from a synonym set. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-synonyms-get-synonym-rule[Endpoint documentation] +[source,ts] +---- +client.synonyms.getSynonymRule({ set_id, rule_id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`set_id` (string)*: The ID of the synonym set to retrieve the synonym rule from. +** *`rule_id` (string)*: The ID of the synonym rule to retrieve. + +[discrete] +==== get_synonyms_sets +Get all synonym sets. +Get a summary of all defined synonym sets. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-synonyms-get-synonym[Endpoint documentation] +[source,ts] +---- +client.synonyms.getSynonymsSets({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`from` (Optional, number)*: The starting offset for synonyms sets to retrieve. +** *`size` (Optional, number)*: The maximum number of synonyms sets to retrieve. + +[discrete] +==== put_synonym +Create or update a synonym set. +Synonyms sets are limited to a maximum of 10,000 synonym rules per set. +If you need to manage more synonym rules, you can create multiple synonym sets. + +When an existing synonyms set is updated, the search analyzers that use the synonyms set are reloaded automatically for all indices. +This is equivalent to invoking the reload search analyzers API for all indices that use the synonyms set. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-synonyms-put-synonym[Endpoint documentation] +[source,ts] +---- +client.synonyms.putSynonym({ id, synonyms_set }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`id` (string)*: The ID of the synonyms set to be created or updated. +** *`synonyms_set` ({ id, synonyms } | { id, synonyms }[])*: The synonym rules definitions for the synonyms set. + +[discrete] +==== put_synonym_rule +Create or update a synonym rule. +Create or update a synonym rule in a synonym set. + +If any of the synonym rules included is invalid, the API returns an error. + +When you update a synonym rule, all analyzers using the synonyms set will be reloaded automatically to reflect the new rule. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-synonyms-put-synonym-rule[Endpoint documentation] +[source,ts] +---- +client.synonyms.putSynonymRule({ set_id, rule_id, synonyms }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`set_id` (string)*: The ID of the synonym set. +** *`rule_id` (string)*: The ID of the synonym rule to be updated or created. +** *`synonyms` (string)*: The synonym rule information definition, which must be in Solr format. + +[discrete] +=== tasks +[discrete] +==== cancel +Cancel a task. + +WARNING: The task management API is new and should still be considered a beta feature. +The API may change in ways that are not backwards compatible. + +A task may continue to run for some time after it has been cancelled because it may not be able to safely stop its current activity straight away. +It is also possible that Elasticsearch must complete its work on other tasks before it can process the cancellation. +The get task information API will continue to list these cancelled tasks until they complete. +The cancelled flag in the response indicates that the cancellation command has been processed and the task will stop as soon as possible. + +To troubleshoot why a cancelled task does not complete promptly, use the get task information API with the `?detailed` parameter to identify the other tasks the system is running. +You can also use the node hot threads API to obtain detailed information about the work the system is doing instead of completing the cancelled task. + +https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-tasks[Endpoint documentation] +[source,ts] +---- +client.tasks.cancel({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`task_id` (Optional, string | number)*: The task identifier. +** *`actions` (Optional, string | string[])*: A list or wildcard expression of actions that is used to limit the request. +** *`nodes` (Optional, string[])*: A list of node IDs or names that is used to limit the request. +** *`parent_task_id` (Optional, string)*: A parent task ID that is used to limit the tasks. +** *`wait_for_completion` (Optional, boolean)*: If true, the request blocks until all found tasks are complete. + +[discrete] +==== get +Get task information. +Get information about a task currently running in the cluster. + +WARNING: The task management API is new and should still be considered a beta feature. +The API may change in ways that are not backwards compatible. + +If the task identifier is not found, a 404 response code indicates that there are no resources that match the request. + +https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-tasks[Endpoint documentation] +[source,ts] +---- +client.tasks.get({ task_id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`task_id` (string)*: The task identifier. +** *`timeout` (Optional, string | -1 | 0)*: The period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. +** *`wait_for_completion` (Optional, boolean)*: If `true`, the request blocks until the task has completed. + +[discrete] +==== list +Get all tasks. +Get information about the tasks currently running on one or more nodes in the cluster. + +WARNING: The task management API is new and should still be considered a beta feature. +The API may change in ways that are not backwards compatible. + +**Identifying running tasks** + +The `X-Opaque-Id header`, when provided on the HTTP request header, is going to be returned as a header in the response as well as in the headers field for in the task information. +This enables you to track certain calls or associate certain tasks with the client that started them. +For example: + +---- +curl -i -H "X-Opaque-Id: 123456" "http://localhost:9200/_tasks?group_by=parents" +---- + +The API returns the following result: + +---- +HTTP/1.1 200 OK +X-Opaque-Id: 123456 +content-type: application/json; charset=UTF-8 +content-length: 831 + +{ + "tasks" : { + "u5lcZHqcQhu-rUoFaqDphA:45" : { + "node" : "u5lcZHqcQhu-rUoFaqDphA", + "id" : 45, + "type" : "transport", + "action" : "cluster:monitor/tasks/lists", + "start_time_in_millis" : 1513823752749, + "running_time_in_nanos" : 293139, + "cancellable" : false, + "headers" : { + "X-Opaque-Id" : "123456" + }, + "children" : [ + { + "node" : "u5lcZHqcQhu-rUoFaqDphA", + "id" : 46, + "type" : "direct", + "action" : "cluster:monitor/tasks/lists[n]", + "start_time_in_millis" : 1513823752750, + "running_time_in_nanos" : 92133, + "cancellable" : false, + "parent_task_id" : "u5lcZHqcQhu-rUoFaqDphA:45", + "headers" : { + "X-Opaque-Id" : "123456" + } + } + ] + } + } + } +---- +In this example, `X-Opaque-Id: 123456` is the ID as a part of the response header. +The `X-Opaque-Id` in the task `headers` is the ID for the task that was initiated by the REST request. +The `X-Opaque-Id` in the children `headers` is the child task of the task that was initiated by the REST request. + +https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-tasks[Endpoint documentation] +[source,ts] +---- +client.tasks.list({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`actions` (Optional, string | string[])*: A list or wildcard expression of actions used to limit the request. +For example, you can use `cluser:*` to retrieve all cluster-related tasks. +** *`detailed` (Optional, boolean)*: If `true`, the response includes detailed information about the running tasks. +This information is useful to distinguish tasks from each other but is more costly to run. +** *`group_by` (Optional, Enum("nodes" | "parents" | "none"))*: A key that is used to group tasks in the response. +The task lists can be grouped either by nodes or by parent tasks. +** *`nodes` (Optional, string | string[])*: A list of node IDs or names that is used to limit the returned information. +** *`parent_task_id` (Optional, string)*: A parent task identifier that is used to limit returned information. +To return all tasks, omit this parameter or use a value of `-1`. +If the parent task is not found, the API does not return a 404 response code. +** *`timeout` (Optional, string | -1 | 0)*: The period to wait for each node to respond. +If a node does not respond before its timeout expires, the response does not include its information. +However, timed out nodes are included in the `node_failures` property. +** *`wait_for_completion` (Optional, boolean)*: If `true`, the request blocks until the operation is complete. + +[discrete] +=== text_structure +[discrete] +==== find_field_structure +Find the structure of a text field. +Find the structure of a text field in an Elasticsearch index. + +This API provides a starting point for extracting further information from log messages already ingested into Elasticsearch. +For example, if you have ingested data into a very simple index that has just `@timestamp` and message fields, you can use this API to see what common structure exists in the message field. + +The response from the API contains: + +* Sample messages. +* Statistics that reveal the most common values for all fields detected within the text and basic numeric statistics for numeric fields. +* Information about the structure of the text, which is useful when you write ingest configurations to index it or similarly formatted text. +* Appropriate mappings for an Elasticsearch index, which you could use to ingest the text. + +All this information can be calculated by the structure finder with no guidance. +However, you can optionally override some of the decisions about the text structure by specifying one or more query parameters. + +If the structure finder produces unexpected results, specify the `explain` query parameter and an explanation will appear in the response. +It helps determine why the returned structure was chosen. + +https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-text_structure[Endpoint documentation] +[source,ts] +---- +client.textStructure.findFieldStructure({ field, index }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`field` (string)*: The field that should be analyzed. +** *`index` (string)*: The name of the index that contains the analyzed field. +** *`column_names` (Optional, string)*: If `format` is set to `delimited`, you can specify the column names in a list. +If this parameter is not specified, the structure finder uses the column names from the header row of the text. +If the text does not have a header row, columns are named "column1", "column2", "column3", for example. +** *`delimiter` (Optional, string)*: If you have set `format` to `delimited`, you can specify the character used to delimit the values in each row. +Only a single character is supported; the delimiter cannot have multiple characters. +By default, the API considers the following possibilities: comma, tab, semi-colon, and pipe (`|`). +In this default scenario, all rows must have the same number of fields for the delimited format to be detected. +If you specify a delimiter, up to 10% of the rows can have a different number of columns than the first row. +** *`documents_to_sample` (Optional, number)*: The number of documents to include in the structural analysis. +The minimum value is 2. +** *`ecs_compatibility` (Optional, Enum("disabled" | "v1"))*: The mode of compatibility with ECS compliant Grok patterns. +Use this parameter to specify whether to use ECS Grok patterns instead of legacy ones when the structure finder creates a Grok pattern. +This setting primarily has an impact when a whole message Grok pattern such as `%{CATALINALOG}` matches the input. +If the structure finder identifies a common structure but has no idea of the meaning then generic field names such as `path`, `ipaddress`, `field1`, and `field2` are used in the `grok_pattern` output. +The intention in that situation is that a user who knows the meanings will rename the fields before using them. +** *`explain` (Optional, boolean)*: If `true`, the response includes a field named `explanation`, which is an array of strings that indicate how the structure finder produced its result. +** *`format` (Optional, Enum("delimited" | "ndjson" | "semi_structured_text" | "xml"))*: The high level structure of the text. +By default, the API chooses the format. +In this default scenario, all rows must have the same number of fields for a delimited format to be detected. +If the format is set to delimited and the delimiter is not set, however, the API tolerates up to 5% of rows that have a different number of columns than the first row. +** *`grok_pattern` (Optional, string)*: If the format is `semi_structured_text`, you can specify a Grok pattern that is used to extract fields from every message in the text. +The name of the timestamp field in the Grok pattern must match what is specified in the `timestamp_field` parameter. +If that parameter is not specified, the name of the timestamp field in the Grok pattern must match "timestamp". +If `grok_pattern` is not specified, the structure finder creates a Grok pattern. +** *`quote` (Optional, string)*: If the format is `delimited`, you can specify the character used to quote the values in each row if they contain newlines or the delimiter character. +Only a single character is supported. +If this parameter is not specified, the default value is a double quote (`"`). +If your delimited text format does not use quoting, a workaround is to set this argument to a character that does not appear anywhere in the sample. +** *`should_trim_fields` (Optional, boolean)*: If the format is `delimited`, you can specify whether values between delimiters should have whitespace trimmed from them. +If this parameter is not specified and the delimiter is pipe (`|`), the default value is true. +Otherwise, the default value is `false`. +** *`timeout` (Optional, string | -1 | 0)*: The maximum amount of time that the structure analysis can take. +If the analysis is still running when the timeout expires, it will be stopped. +** *`timestamp_field` (Optional, string)*: The name of the field that contains the primary timestamp of each record in the text. +In particular, if the text was ingested into an index, this is the field that would be used to populate the `@timestamp` field. + +If the format is `semi_structured_text`, this field must match the name of the appropriate extraction in the `grok_pattern`. +Therefore, for semi-structured text, it is best not to specify this parameter unless `grok_pattern` is also specified. + +For structured text, if you specify this parameter, the field must exist within the text. + +If this parameter is not specified, the structure finder makes a decision about which field (if any) is the primary timestamp field. +For structured text, it is not compulsory to have a timestamp in the text. +** *`timestamp_format` (Optional, string)*: The Java time format of the timestamp field in the text. +Only a subset of Java time format letter groups are supported: + +* `a` +* `d` +* `dd` +* `EEE` +* `EEEE` +* `H` +* `HH` +* `h` +* `M` +* `MM` +* `MMM` +* `MMMM` +* `mm` +* `ss` +* `XX` +* `XXX` +* `yy` +* `yyyy` +* `zzz` + +Additionally `S` letter groups (fractional seconds) of length one to nine are supported providing they occur after `ss` and are separated from the `ss` by a period (`.`), comma (`,`), or colon (`:`). +Spacing and punctuation is also permitted with the exception a question mark (`?`), newline, and carriage return, together with literal text enclosed in single quotes. +For example, `MM/dd HH.mm.ss,SSSSSS 'in' yyyy` is a valid override format. + +One valuable use case for this parameter is when the format is semi-structured text, there are multiple timestamp formats in the text, and you know which format corresponds to the primary timestamp, but you do not want to specify the full `grok_pattern`. +Another is when the timestamp format is one that the structure finder does not consider by default. + +If this parameter is not specified, the structure finder chooses the best format from a built-in set. + +If the special value `null` is specified, the structure finder will not look for a primary timestamp in the text. +When the format is semi-structured text, this will result in the structure finder treating the text as single-line messages. + +[discrete] +==== find_message_structure +Find the structure of text messages. +Find the structure of a list of text messages. +The messages must contain data that is suitable to be ingested into Elasticsearch. + +This API provides a starting point for ingesting data into Elasticsearch in a format that is suitable for subsequent use with other Elastic Stack functionality. +Use this API rather than the find text structure API if your input text has already been split up into separate messages by some other process. + +The response from the API contains: + +* Sample messages. +* Statistics that reveal the most common values for all fields detected within the text and basic numeric statistics for numeric fields. +* Information about the structure of the text, which is useful when you write ingest configurations to index it or similarly formatted text. +Appropriate mappings for an Elasticsearch index, which you could use to ingest the text. + +All this information can be calculated by the structure finder with no guidance. +However, you can optionally override some of the decisions about the text structure by specifying one or more query parameters. + +If the structure finder produces unexpected results, specify the `explain` query parameter and an explanation will appear in the response. +It helps determine why the returned structure was chosen. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-text-structure-find-message-structure[Endpoint documentation] +[source,ts] +---- +client.textStructure.findMessageStructure({ messages }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`messages` (string[])*: The list of messages you want to analyze. +** *`column_names` (Optional, string)*: If the format is `delimited`, you can specify the column names in a list. +If this parameter is not specified, the structure finder uses the column names from the header row of the text. +If the text does not have a header role, columns are named "column1", "column2", "column3", for example. +** *`delimiter` (Optional, string)*: If you the format is `delimited`, you can specify the character used to delimit the values in each row. +Only a single character is supported; the delimiter cannot have multiple characters. +By default, the API considers the following possibilities: comma, tab, semi-colon, and pipe (`|`). +In this default scenario, all rows must have the same number of fields for the delimited format to be detected. +If you specify a delimiter, up to 10% of the rows can have a different number of columns than the first row. +** *`ecs_compatibility` (Optional, Enum("disabled" | "v1"))*: The mode of compatibility with ECS compliant Grok patterns. +Use this parameter to specify whether to use ECS Grok patterns instead of legacy ones when the structure finder creates a Grok pattern. +This setting primarily has an impact when a whole message Grok pattern such as `%{CATALINALOG}` matches the input. +If the structure finder identifies a common structure but has no idea of meaning then generic field names such as `path`, `ipaddress`, `field1`, and `field2` are used in the `grok_pattern` output, with the intention that a user who knows the meanings rename these fields before using it. +** *`explain` (Optional, boolean)*: If this parameter is set to true, the response includes a field named `explanation`, which is an array of strings that indicate how the structure finder produced its result. +** *`format` (Optional, Enum("delimited" | "ndjson" | "semi_structured_text" | "xml"))*: The high level structure of the text. +By default, the API chooses the format. +In this default scenario, all rows must have the same number of fields for a delimited format to be detected. +If the format is `delimited` and the delimiter is not set, however, the API tolerates up to 5% of rows that have a different number of columns than the first row. +** *`grok_pattern` (Optional, string)*: If the format is `semi_structured_text`, you can specify a Grok pattern that is used to extract fields from every message in the text. +The name of the timestamp field in the Grok pattern must match what is specified in the `timestamp_field` parameter. +If that parameter is not specified, the name of the timestamp field in the Grok pattern must match "timestamp". +If `grok_pattern` is not specified, the structure finder creates a Grok pattern. +** *`quote` (Optional, string)*: If the format is `delimited`, you can specify the character used to quote the values in each row if they contain newlines or the delimiter character. +Only a single character is supported. +If this parameter is not specified, the default value is a double quote (`"`). +If your delimited text format does not use quoting, a workaround is to set this argument to a character that does not appear anywhere in the sample. +** *`should_trim_fields` (Optional, boolean)*: If the format is `delimited`, you can specify whether values between delimiters should have whitespace trimmed from them. +If this parameter is not specified and the delimiter is pipe (`|`), the default value is true. +Otherwise, the default value is `false`. +** *`timeout` (Optional, string | -1 | 0)*: The maximum amount of time that the structure analysis can take. +If the analysis is still running when the timeout expires, it will be stopped. +** *`timestamp_field` (Optional, string)*: The name of the field that contains the primary timestamp of each record in the text. +In particular, if the text was ingested into an index, this is the field that would be used to populate the `@timestamp` field. + +If the format is `semi_structured_text`, this field must match the name of the appropriate extraction in the `grok_pattern`. +Therefore, for semi-structured text, it is best not to specify this parameter unless `grok_pattern` is also specified. + +For structured text, if you specify this parameter, the field must exist within the text. + +If this parameter is not specified, the structure finder makes a decision about which field (if any) is the primary timestamp field. +For structured text, it is not compulsory to have a timestamp in the text. +** *`timestamp_format` (Optional, string)*: The Java time format of the timestamp field in the text. +Only a subset of Java time format letter groups are supported: + +* `a` +* `d` +* `dd` +* `EEE` +* `EEEE` +* `H` +* `HH` +* `h` +* `M` +* `MM` +* `MMM` +* `MMMM` +* `mm` +* `ss` +* `XX` +* `XXX` +* `yy` +* `yyyy` +* `zzz` + +Additionally `S` letter groups (fractional seconds) of length one to nine are supported providing they occur after `ss` and are separated from the `ss` by a period (`.`), comma (`,`), or colon (`:`). +Spacing and punctuation is also permitted with the exception a question mark (`?`), newline, and carriage return, together with literal text enclosed in single quotes. +For example, `MM/dd HH.mm.ss,SSSSSS 'in' yyyy` is a valid override format. + +One valuable use case for this parameter is when the format is semi-structured text, there are multiple timestamp formats in the text, and you know which format corresponds to the primary timestamp, but you do not want to specify the full `grok_pattern`. +Another is when the timestamp format is one that the structure finder does not consider by default. + +If this parameter is not specified, the structure finder chooses the best format from a built-in set. + +If the special value `null` is specified, the structure finder will not look for a primary timestamp in the text. +When the format is semi-structured text, this will result in the structure finder treating the text as single-line messages. + +[discrete] +==== find_structure +Find the structure of a text file. +The text file must contain data that is suitable to be ingested into Elasticsearch. + +This API provides a starting point for ingesting data into Elasticsearch in a format that is suitable for subsequent use with other Elastic Stack functionality. +Unlike other Elasticsearch endpoints, the data that is posted to this endpoint does not need to be UTF-8 encoded and in JSON format. +It must, however, be text; binary text formats are not currently supported. +The size is limited to the Elasticsearch HTTP receive buffer size, which defaults to 100 Mb. + +The response from the API contains: + +* A couple of messages from the beginning of the text. +* Statistics that reveal the most common values for all fields detected within the text and basic numeric statistics for numeric fields. +* Information about the structure of the text, which is useful when you write ingest configurations to index it or similarly formatted text. +* Appropriate mappings for an Elasticsearch index, which you could use to ingest the text. + +All this information can be calculated by the structure finder with no guidance. +However, you can optionally override some of the decisions about the text structure by specifying one or more query parameters. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-text-structure-find-structure[Endpoint documentation] +[source,ts] +---- +client.textStructure.findStructure({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`text_files` (Optional, TJsonDocument[])* +** *`charset` (Optional, string)*: The text's character set. +It must be a character set that is supported by the JVM that Elasticsearch uses. +For example, `UTF-8`, `UTF-16LE`, `windows-1252`, or `EUC-JP`. +If this parameter is not specified, the structure finder chooses an appropriate character set. +** *`column_names` (Optional, string)*: If you have set format to `delimited`, you can specify the column names in a list. +If this parameter is not specified, the structure finder uses the column names from the header row of the text. +If the text does not have a header role, columns are named "column1", "column2", "column3", for example. +** *`delimiter` (Optional, string)*: If you have set `format` to `delimited`, you can specify the character used to delimit the values in each row. +Only a single character is supported; the delimiter cannot have multiple characters. +By default, the API considers the following possibilities: comma, tab, semi-colon, and pipe (`|`). +In this default scenario, all rows must have the same number of fields for the delimited format to be detected. +If you specify a delimiter, up to 10% of the rows can have a different number of columns than the first row. +** *`ecs_compatibility` (Optional, string)*: The mode of compatibility with ECS compliant Grok patterns. +Use this parameter to specify whether to use ECS Grok patterns instead of legacy ones when the structure finder creates a Grok pattern. +Valid values are `disabled` and `v1`. +This setting primarily has an impact when a whole message Grok pattern such as `%{CATALINALOG}` matches the input. +If the structure finder identifies a common structure but has no idea of meaning then generic field names such as `path`, `ipaddress`, `field1`, and `field2` are used in the `grok_pattern` output, with the intention that a user who knows the meanings rename these fields before using it. +** *`explain` (Optional, boolean)*: If this parameter is set to `true`, the response includes a field named explanation, which is an array of strings that indicate how the structure finder produced its result. +If the structure finder produces unexpected results for some text, use this query parameter to help you determine why the returned structure was chosen. +** *`format` (Optional, string)*: The high level structure of the text. +Valid values are `ndjson`, `xml`, `delimited`, and `semi_structured_text`. +By default, the API chooses the format. +In this default scenario, all rows must have the same number of fields for a delimited format to be detected. +If the format is set to `delimited` and the delimiter is not set, however, the API tolerates up to 5% of rows that have a different number of columns than the first row. +** *`grok_pattern` (Optional, string)*: If you have set `format` to `semi_structured_text`, you can specify a Grok pattern that is used to extract fields from every message in the text. +The name of the timestamp field in the Grok pattern must match what is specified in the `timestamp_field` parameter. +If that parameter is not specified, the name of the timestamp field in the Grok pattern must match "timestamp". +If `grok_pattern` is not specified, the structure finder creates a Grok pattern. +** *`has_header_row` (Optional, boolean)*: If you have set `format` to `delimited`, you can use this parameter to indicate whether the column names are in the first row of the text. +If this parameter is not specified, the structure finder guesses based on the similarity of the first row of the text to other rows. +** *`line_merge_size_limit` (Optional, number)*: The maximum number of characters in a message when lines are merged to form messages while analyzing semi-structured text. +If you have extremely long messages you may need to increase this, but be aware that this may lead to very long processing times if the way to group lines into messages is misdetected. +** *`lines_to_sample` (Optional, number)*: The number of lines to include in the structural analysis, starting from the beginning of the text. +The minimum is 2. +If the value of this parameter is greater than the number of lines in the text, the analysis proceeds (as long as there are at least two lines in the text) for all of the lines. + +NOTE: The number of lines and the variation of the lines affects the speed of the analysis. +For example, if you upload text where the first 1000 lines are all variations on the same message, the analysis will find more commonality than would be seen with a bigger sample. +If possible, however, it is more efficient to upload sample text with more variety in the first 1000 lines than to request analysis of 100000 lines to achieve some variety. +** *`quote` (Optional, string)*: If you have set `format` to `delimited`, you can specify the character used to quote the values in each row if they contain newlines or the delimiter character. +Only a single character is supported. +If this parameter is not specified, the default value is a double quote (`"`). +If your delimited text format does not use quoting, a workaround is to set this argument to a character that does not appear anywhere in the sample. +** *`should_trim_fields` (Optional, boolean)*: If you have set `format` to `delimited`, you can specify whether values between delimiters should have whitespace trimmed from them. +If this parameter is not specified and the delimiter is pipe (`|`), the default value is `true`. +Otherwise, the default value is `false`. +** *`timeout` (Optional, string | -1 | 0)*: The maximum amount of time that the structure analysis can take. +If the analysis is still running when the timeout expires then it will be stopped. +** *`timestamp_field` (Optional, string)*: The name of the field that contains the primary timestamp of each record in the text. +In particular, if the text were ingested into an index, this is the field that would be used to populate the `@timestamp` field. + +If the `format` is `semi_structured_text`, this field must match the name of the appropriate extraction in the `grok_pattern`. +Therefore, for semi-structured text, it is best not to specify this parameter unless `grok_pattern` is also specified. + +For structured text, if you specify this parameter, the field must exist within the text. + +If this parameter is not specified, the structure finder makes a decision about which field (if any) is the primary timestamp field. +For structured text, it is not compulsory to have a timestamp in the text. +** *`timestamp_format` (Optional, string)*: The Java time format of the timestamp field in the text. + +Only a subset of Java time format letter groups are supported: + +* `a` +* `d` +* `dd` +* `EEE` +* `EEEE` +* `H` +* `HH` +* `h` +* `M` +* `MM` +* `MMM` +* `MMMM` +* `mm` +* `ss` +* `XX` +* `XXX` +* `yy` +* `yyyy` +* `zzz` + +Additionally `S` letter groups (fractional seconds) of length one to nine are supported providing they occur after `ss` and separated from the `ss` by a `.`, `,` or `:`. +Spacing and punctuation is also permitted with the exception of `?`, newline and carriage return, together with literal text enclosed in single quotes. +For example, `MM/dd HH.mm.ss,SSSSSS 'in' yyyy` is a valid override format. + +One valuable use case for this parameter is when the format is semi-structured text, there are multiple timestamp formats in the text, and you know which format corresponds to the primary timestamp, but you do not want to specify the full `grok_pattern`. +Another is when the timestamp format is one that the structure finder does not consider by default. + +If this parameter is not specified, the structure finder chooses the best format from a built-in set. + +If the special value `null` is specified the structure finder will not look for a primary timestamp in the text. +When the format is semi-structured text this will result in the structure finder treating the text as single-line messages. + +[discrete] +==== test_grok_pattern +Test a Grok pattern. +Test a Grok pattern on one or more lines of text. +The API indicates whether the lines match the pattern together with the offsets and lengths of the matched substrings. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-text-structure-test-grok-pattern[Endpoint documentation] +[source,ts] +---- +client.textStructure.testGrokPattern({ grok_pattern, text }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`grok_pattern` (string)*: The Grok pattern to run on the text. +** *`text` (string[])*: The lines of text to run the Grok pattern on. +** *`ecs_compatibility` (Optional, string)*: The mode of compatibility with ECS compliant Grok patterns. +Use this parameter to specify whether to use ECS Grok patterns instead of legacy ones when the structure finder creates a Grok pattern. +Valid values are `disabled` and `v1`. + +[discrete] +=== transform +[discrete] +==== delete_transform +Delete a transform. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-delete-transform[Endpoint documentation] +[source,ts] +---- +client.transform.deleteTransform({ transform_id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`transform_id` (string)*: Identifier for the transform. +** *`force` (Optional, boolean)*: If this value is false, the transform must be stopped before it can be deleted. If true, the transform is +deleted regardless of its current state. +** *`delete_dest_index` (Optional, boolean)*: If this value is true, the destination index is deleted together with the transform. If false, the destination +index will not be deleted +** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + +[discrete] +==== get_node_stats +Retrieves transform usage information for transform nodes. +[source,ts] +---- +client.transform.getNodeStats() +---- + + +[discrete] +==== get_transform +Get transforms. +Get configuration information for transforms. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-get-transform[Endpoint documentation] +[source,ts] +---- +client.transform.getTransform({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`transform_id` (Optional, string | string[])*: Identifier for the transform. It can be a transform identifier or a +wildcard expression. You can get information for all transforms by using +`_all`, by specifying `*` as the ``, or by omitting the +``. +** *`allow_no_match` (Optional, boolean)*: Specifies what to do when the request: + +. Contains wildcard expressions and there are no transforms that match. +. Contains the _all string or no identifiers and there are no matches. +. Contains wildcard expressions and there are only partial matches. + +If this parameter is false, the request returns a 404 status code when +there are no matches or only partial matches. +** *`from` (Optional, number)*: Skips the specified number of transforms. +** *`size` (Optional, number)*: Specifies the maximum number of transforms to obtain. +** *`exclude_generated` (Optional, boolean)*: Excludes fields that were automatically added when creating the +transform. This allows the configuration to be in an acceptable format to +be retrieved and then added to another cluster. + +[discrete] +==== get_transform_stats +Get transform stats. + +Get usage information for transforms. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-get-transform-stats[Endpoint documentation] +[source,ts] +---- +client.transform.getTransformStats({ transform_id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`transform_id` (string | string[])*: Identifier for the transform. It can be a transform identifier or a +wildcard expression. You can get information for all transforms by using +`_all`, by specifying `*` as the ``, or by omitting the +``. +** *`allow_no_match` (Optional, boolean)*: Specifies what to do when the request: + +. Contains wildcard expressions and there are no transforms that match. +. Contains the _all string or no identifiers and there are no matches. +. Contains wildcard expressions and there are only partial matches. + +If this parameter is false, the request returns a 404 status code when +there are no matches or only partial matches. +** *`from` (Optional, number)*: Skips the specified number of transforms. +** *`size` (Optional, number)*: Specifies the maximum number of transforms to obtain. +** *`timeout` (Optional, string | -1 | 0)*: Controls the time to wait for the stats + +[discrete] +==== preview_transform +Preview a transform. +Generates a preview of the results that you will get when you create a transform with the same configuration. + +It returns a maximum of 100 results. The calculations are based on all the current data in the source index. It also +generates a list of mappings and settings for the destination index. These values are determined based on the field +types of the source index and the transform aggregations. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-preview-transform[Endpoint documentation] +[source,ts] +---- +client.transform.previewTransform({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`transform_id` (Optional, string)*: Identifier for the transform to preview. If you specify this path parameter, you cannot provide transform +configuration details in the request body. +** *`dest` (Optional, { index, op_type, pipeline, routing, version_type })*: The destination for the transform. +** *`description` (Optional, string)*: Free text description of the transform. +** *`frequency` (Optional, string | -1 | 0)*: The interval between checks for changes in the source indices when the +transform is running continuously. Also determines the retry interval in +the event of transient failures while the transform is searching or +indexing. The minimum value is 1s and the maximum is 1h. +** *`pivot` (Optional, { aggregations, group_by })*: The pivot method transforms the data by aggregating and grouping it. +These objects define the group by fields and the aggregation to reduce +the data. +** *`source` (Optional, { index, query, remote, size, slice, sort, _source, runtime_mappings })*: The source of the data for the transform. +** *`settings` (Optional, { align_checkpoints, dates_as_epoch_millis, deduce_mappings, docs_per_second, max_page_search_size, unattended })*: Defines optional transform settings. +** *`sync` (Optional, { time })*: Defines the properties transforms require to run continuously. +** *`retention_policy` (Optional, { time })*: Defines a retention policy for the transform. Data that meets the defined +criteria is deleted from the destination index. +** *`latest` (Optional, { sort, unique_key })*: The latest method transforms the data by finding the latest document for +each unique key. +** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the +timeout expires, the request fails and returns an error. + +[discrete] +==== put_transform +Create a transform. +Creates a transform. + +A transform copies data from source indices, transforms it, and persists it into an entity-centric destination index. You can also think of the destination index as a two-dimensional tabular data structure (known as +a data frame). The ID for each document in the data frame is generated from a hash of the entity, so there is a +unique row per entity. + +You must choose either the latest or pivot method for your transform; you cannot use both in a single transform. If +you choose to use the pivot method for your transform, the entities are defined by the set of `group_by` fields in +the pivot object. If you choose to use the latest method, the entities are defined by the `unique_key` field values +in the latest object. + +You must have `create_index`, `index`, and `read` privileges on the destination index and `read` and +`view_index_metadata` privileges on the source indices. When Elasticsearch security features are enabled, the +transform remembers which roles the user that created it had at the time of creation and uses those same roles. If +those roles do not have the required privileges on the source and destination indices, the transform fails when it +attempts unauthorized operations. + +NOTE: You must use Kibana or this API to create a transform. Do not add a transform directly into any +`.transform-internal*` indices using the Elasticsearch index API. If Elasticsearch security features are enabled, do +not give users any privileges on `.transform-internal*` indices. If you used transforms prior to 7.5, also do not +give users any privileges on `.data-frame-internal*` indices. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-put-transform[Endpoint documentation] +[source,ts] +---- +client.transform.putTransform({ transform_id, dest, source }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`transform_id` (string)*: Identifier for the transform. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), +hyphens, and underscores. It has a 64 character limit and must start and end with alphanumeric characters. +** *`dest` ({ index, op_type, pipeline, routing, version_type })*: The destination for the transform. +** *`source` ({ index, query, remote, size, slice, sort, _source, runtime_mappings })*: The source of the data for the transform. +** *`description` (Optional, string)*: Free text description of the transform. +** *`frequency` (Optional, string | -1 | 0)*: The interval between checks for changes in the source indices when the transform is running continuously. Also +determines the retry interval in the event of transient failures while the transform is searching or indexing. +The minimum value is `1s` and the maximum is `1h`. +** *`latest` (Optional, { sort, unique_key })*: The latest method transforms the data by finding the latest document for each unique key. +** *`_meta` (Optional, Record)*: Defines optional transform metadata. +** *`pivot` (Optional, { aggregations, group_by })*: The pivot method transforms the data by aggregating and grouping it. These objects define the group by fields +and the aggregation to reduce the data. +** *`retention_policy` (Optional, { time })*: Defines a retention policy for the transform. Data that meets the defined criteria is deleted from the +destination index. +** *`settings` (Optional, { align_checkpoints, dates_as_epoch_millis, deduce_mappings, docs_per_second, max_page_search_size, unattended })*: Defines optional transform settings. +** *`sync` (Optional, { time })*: Defines the properties transforms require to run continuously. +** *`defer_validation` (Optional, boolean)*: When the transform is created, a series of validations occur to ensure its success. For example, there is a +check for the existence of the source indices and a check that the destination index is not part of the source +index pattern. You can use this parameter to skip the checks, for example when the source index does not exist +until after the transform is created. The validations are always run when you start the transform, however, with +the exception of privilege checks. +** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + +[discrete] +==== reset_transform +Reset a transform. + +Before you can reset it, you must stop it; alternatively, use the `force` query parameter. +If the destination index was created by the transform, it is deleted. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-reset-transform[Endpoint documentation] +[source,ts] +---- +client.transform.resetTransform({ transform_id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`transform_id` (string)*: Identifier for the transform. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), +hyphens, and underscores. It has a 64 character limit and must start and end with alphanumeric characters. +** *`force` (Optional, boolean)*: If this value is `true`, the transform is reset regardless of its current state. If it's `false`, the transform +must be stopped before it can be reset. +** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + +[discrete] +==== schedule_now_transform +Schedule a transform to start now. + +Instantly run a transform to process data. +If you run this API, the transform will process the new data instantly, +without waiting for the configured frequency interval. After the API is called, +the transform will be processed again at `now + frequency` unless the API +is called again in the meantime. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-schedule-now-transform[Endpoint documentation] +[source,ts] +---- +client.transform.scheduleNowTransform({ transform_id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`transform_id` (string)*: Identifier for the transform. +** *`timeout` (Optional, string | -1 | 0)*: Controls the time to wait for the scheduling to take place + +[discrete] +==== start_transform +Start a transform. + +When you start a transform, it creates the destination index if it does not already exist. The `number_of_shards` is +set to `1` and the `auto_expand_replicas` is set to `0-1`. If it is a pivot transform, it deduces the mapping +definitions for the destination index from the source indices and the transform aggregations. If fields in the +destination index are derived from scripts (as in the case of `scripted_metric` or `bucket_script` aggregations), +the transform uses dynamic mappings unless an index template exists. If it is a latest transform, it does not deduce +mapping definitions; it uses dynamic mappings. To use explicit mappings, create the destination index before you +start the transform. Alternatively, you can create an index template, though it does not affect the deduced mappings +in a pivot transform. + +When the transform starts, a series of validations occur to ensure its success. If you deferred validation when you +created the transform, they occur when you start the transform—​with the exception of privilege checks. When +Elasticsearch security features are enabled, the transform remembers which roles the user that created it had at the +time of creation and uses those same roles. If those roles do not have the required privileges on the source and +destination indices, the transform fails when it attempts unauthorized operations. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-start-transform[Endpoint documentation] +[source,ts] +---- +client.transform.startTransform({ transform_id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`transform_id` (string)*: Identifier for the transform. +** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +** *`from` (Optional, string)*: Restricts the set of transformed entities to those changed after this time. Relative times like now-30d are supported. Only applicable for continuous transforms. + +[discrete] +==== stop_transform +Stop transforms. +Stops one or more transforms. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-stop-transform[Endpoint documentation] +[source,ts] +---- +client.transform.stopTransform({ transform_id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`transform_id` (string)*: Identifier for the transform. To stop multiple transforms, use a list or a wildcard expression. +To stop all transforms, use `_all` or `*` as the identifier. +** *`allow_no_match` (Optional, boolean)*: Specifies what to do when the request: contains wildcard expressions and there are no transforms that match; +contains the `_all` string or no identifiers and there are no matches; contains wildcard expressions and there +are only partial matches. + +If it is true, the API returns a successful acknowledgement message when there are no matches. When there are +only partial matches, the API stops the appropriate transforms. + +If it is false, the request returns a 404 status code when there are no matches or only partial matches. +** *`force` (Optional, boolean)*: If it is true, the API forcefully stops the transforms. +** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response when `wait_for_completion` is `true`. If no response is received before the +timeout expires, the request returns a timeout exception. However, the request continues processing and +eventually moves the transform to a STOPPED state. +** *`wait_for_checkpoint` (Optional, boolean)*: If it is true, the transform does not completely stop until the current checkpoint is completed. If it is false, +the transform stops as soon as possible. +** *`wait_for_completion` (Optional, boolean)*: If it is true, the API blocks until the indexer state completely stops. If it is false, the API returns +immediately and the indexer is stopped asynchronously in the background. + +[discrete] +==== update_transform +Update a transform. +Updates certain properties of a transform. + +All updated properties except `description` do not take effect until after the transform starts the next checkpoint, +thus there is data consistency in each checkpoint. To use this API, you must have `read` and `view_index_metadata` +privileges for the source indices. You must also have `index` and `read` privileges for the destination index. When +Elasticsearch security features are enabled, the transform remembers which roles the user who updated it had at the +time of update and runs with those privileges. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-update-transform[Endpoint documentation] +[source,ts] +---- +client.transform.updateTransform({ transform_id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`transform_id` (string)*: Identifier for the transform. +** *`dest` (Optional, { index, op_type, pipeline, routing, version_type })*: The destination for the transform. +** *`description` (Optional, string)*: Free text description of the transform. +** *`frequency` (Optional, string | -1 | 0)*: The interval between checks for changes in the source indices when the +transform is running continuously. Also determines the retry interval in +the event of transient failures while the transform is searching or +indexing. The minimum value is 1s and the maximum is 1h. +** *`_meta` (Optional, Record)*: Defines optional transform metadata. +** *`source` (Optional, { index, query, remote, size, slice, sort, _source, runtime_mappings })*: The source of the data for the transform. +** *`settings` (Optional, { align_checkpoints, dates_as_epoch_millis, deduce_mappings, docs_per_second, max_page_search_size, unattended })*: Defines optional transform settings. +** *`sync` (Optional, { time })*: Defines the properties transforms require to run continuously. +** *`retention_policy` (Optional, { time } | null)*: Defines a retention policy for the transform. Data that meets the defined +criteria is deleted from the destination index. +** *`defer_validation` (Optional, boolean)*: When true, deferrable validations are not run. This behavior may be +desired if the source index does not exist until after the transform is +created. +** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the +timeout expires, the request fails and returns an error. + +[discrete] +==== upgrade_transforms +Upgrade all transforms. + +Transforms are compatible across minor versions and between supported major versions. +However, over time, the format of transform configuration information may change. +This API identifies transforms that have a legacy configuration format and upgrades them to the latest version. +It also cleans up the internal data structures that store the transform state and checkpoints. +The upgrade does not affect the source and destination indices. +The upgrade also does not affect the roles that transforms use when Elasticsearch security features are enabled; the role used to read source data and write to the destination index remains unchanged. + +If a transform upgrade step fails, the upgrade stops and an error is returned about the underlying issue. +Resolve the issue then re-run the process again. +A summary is returned when the upgrade is finished. + +To ensure continuous transforms remain running during a major version upgrade of the cluster – for example, from 7.16 to 8.0 – it is recommended to upgrade transforms before upgrading the cluster. +You may want to perform a recent cluster backup prior to the upgrade. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-upgrade-transforms[Endpoint documentation] +[source,ts] +---- +client.transform.upgradeTransforms({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`dry_run` (Optional, boolean)*: When true, the request checks for updates but does not run them. +** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and +returns an error. + +[discrete] +=== watcher +[discrete] +==== ack_watch +Acknowledge a watch. +Acknowledging a watch enables you to manually throttle the execution of the watch's actions. + +The acknowledgement state of an action is stored in the `status.actions..ack.state` structure. + +IMPORTANT: If the specified watch is currently being executed, this API will return an error +The reason for this behavior is to prevent overwriting the watch status from a watch execution. + +Acknowledging an action throttles further executions of that action until its `ack.state` is reset to `awaits_successful_execution`. +This happens when the condition of the watch is not met (the condition evaluates to false). + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-ack-watch[Endpoint documentation] +[source,ts] +---- +client.watcher.ackWatch({ watch_id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`watch_id` (string)*: The watch identifier. +** *`action_id` (Optional, string | string[])*: A list of the action identifiers to acknowledge. +If you omit this parameter, all of the actions of the watch are acknowledged. + +[discrete] +==== activate_watch +Activate a watch. +A watch can be either active or inactive. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-activate-watch[Endpoint documentation] +[source,ts] +---- +client.watcher.activateWatch({ watch_id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`watch_id` (string)*: The watch identifier. + +[discrete] +==== deactivate_watch +Deactivate a watch. +A watch can be either active or inactive. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-deactivate-watch[Endpoint documentation] +[source,ts] +---- +client.watcher.deactivateWatch({ watch_id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`watch_id` (string)*: The watch identifier. + +[discrete] +==== delete_watch +Delete a watch. +When the watch is removed, the document representing the watch in the `.watches` index is gone and it will never be run again. + +Deleting a watch does not delete any watch execution records related to this watch from the watch history. + +IMPORTANT: Deleting a watch must be done by using only this API. +Do not delete the watch directly from the `.watches` index using the Elasticsearch delete document API +When Elasticsearch security features are enabled, make sure no write privileges are granted to anyone for the `.watches` index. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-delete-watch[Endpoint documentation] +[source,ts] +---- +client.watcher.deleteWatch({ id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`id` (string)*: The watch identifier. + +[discrete] +==== execute_watch +Run a watch. +This API can be used to force execution of the watch outside of its triggering logic or to simulate the watch execution for debugging purposes. + +For testing and debugging purposes, you also have fine-grained control on how the watch runs. +You can run the watch without running all of its actions or alternatively by simulating them. +You can also force execution by ignoring the watch condition and control whether a watch record would be written to the watch history after it runs. + +You can use the run watch API to run watches that are not yet registered by specifying the watch definition inline. +This serves as great tool for testing and debugging your watches prior to adding them to Watcher. + +When Elasticsearch security features are enabled on your cluster, watches are run with the privileges of the user that stored the watches. +If your user is allowed to read index `a`, but not index `b`, then the exact same set of rules will apply during execution of a watch. + +When using the run watch API, the authorization data of the user that called the API will be used as a base, instead of the information who stored the watch. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-execute-watch[Endpoint documentation] +[source,ts] +---- +client.watcher.executeWatch({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`id` (Optional, string)*: The watch identifier. +** *`action_modes` (Optional, Record)*: Determines how to handle the watch actions as part of the watch execution. +** *`alternative_input` (Optional, Record)*: When present, the watch uses this object as a payload instead of executing its own input. +** *`ignore_condition` (Optional, boolean)*: When set to `true`, the watch execution uses the always condition. This can also be specified as an HTTP parameter. +** *`record_execution` (Optional, boolean)*: When set to `true`, the watch record representing the watch execution result is persisted to the `.watcher-history` index for the current time. +In addition, the status of the watch is updated, possibly throttling subsequent runs. +This can also be specified as an HTTP parameter. +** *`simulated_actions` (Optional, { actions, all, use_all })* +** *`trigger_data` (Optional, { scheduled_time, triggered_time })*: This structure is parsed as the data of the trigger event that will be used during the watch execution. +** *`watch` (Optional, { actions, condition, input, metadata, status, throttle_period, throttle_period_in_millis, transform, trigger })*: When present, this watch is used instead of the one specified in the request. +This watch is not persisted to the index and `record_execution` cannot be set. +** *`debug` (Optional, boolean)*: Defines whether the watch runs in debug mode. + +[discrete] +==== get_settings +Get Watcher index settings. +Get settings for the Watcher internal index (`.watches`). +Only a subset of settings are shown, for example `index.auto_expand_replicas` and `index.number_of_replicas`. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-get-settings[Endpoint documentation] +[source,ts] +---- +client.watcher.getSettings({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. + +[discrete] +==== get_watch +Get a watch. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-get-watch[Endpoint documentation] +[source,ts] +---- +client.watcher.getWatch({ id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`id` (string)*: The watch identifier. + +[discrete] +==== put_watch +Create or update a watch. +When a watch is registered, a new document that represents the watch is added to the `.watches` index and its trigger is immediately registered with the relevant trigger engine. +Typically for the `schedule` trigger, the scheduler is the trigger engine. + +IMPORTANT: You must use Kibana or this API to create a watch. +Do not add a watch directly to the `.watches` index by using the Elasticsearch index API. +If Elasticsearch security features are enabled, do not give users write privileges on the `.watches` index. + +When you add a watch you can also define its initial active state by setting the *active* parameter. + +When Elasticsearch security features are enabled, your watch can index or search only on indices for which the user that stored the watch has privileges. +If the user is able to read index `a`, but not index `b`, the same will apply when the watch runs. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-put-watch[Endpoint documentation] +[source,ts] +---- +client.watcher.putWatch({ id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`id` (string)*: The identifier for the watch. +** *`actions` (Optional, Record)*: The list of actions that will be run if the condition matches. +** *`condition` (Optional, { always, array_compare, compare, never, script })*: The condition that defines if the actions should be run. +** *`input` (Optional, { chain, http, search, simple })*: The input that defines the input that loads the data for the watch. +** *`metadata` (Optional, Record)*: Metadata JSON that will be copied into the history entries. +** *`throttle_period` (Optional, string | -1 | 0)*: The minimum time between actions being run. +The default is 5 seconds. +This default can be changed in the config file with the setting `xpack.watcher.throttle.period.default_period`. +If both this value and the `throttle_period_in_millis` parameter are specified, Watcher uses the last parameter included in the request. +** *`throttle_period_in_millis` (Optional, Unit)*: Minimum time in milliseconds between actions being run. Defaults to 5000. If both this value and the throttle_period parameter are specified, Watcher uses the last parameter included in the request. +** *`transform` (Optional, { chain, script, search })*: The transform that processes the watch payload to prepare it for the watch actions. +** *`trigger` (Optional, { schedule })*: The trigger that defines when the watch should run. +** *`active` (Optional, boolean)*: The initial state of the watch. +The default value is `true`, which means the watch is active by default. +** *`if_primary_term` (Optional, number)*: only update the watch if the last operation that has changed the watch has the specified primary term +** *`if_seq_no` (Optional, number)*: only update the watch if the last operation that has changed the watch has the specified sequence number +** *`version` (Optional, number)*: Explicit version number for concurrency control + +[discrete] +==== query_watches +Query watches. +Get all registered watches in a paginated manner and optionally filter watches by a query. + +Note that only the `_id` and `metadata.*` fields are queryable or sortable. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-query-watches[Endpoint documentation] +[source,ts] +---- +client.watcher.queryWatches({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`from` (Optional, number)*: The offset from the first result to fetch. +It must be non-negative. +** *`size` (Optional, number)*: The number of hits to return. +It must be non-negative. +** *`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })*: A query that filters the watches to be returned. +** *`sort` (Optional, string | { _score, _doc, _geo_distance, _script } | string | { _score, _doc, _geo_distance, _script }[])*: One or more fields used to sort the search results. +** *`search_after` (Optional, number | number | string | boolean | null[])*: Retrieve the next page of hits using a set of sort values from the previous page. + +[discrete] +==== start +Start the watch service. +Start the Watcher service if it is not already running. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-start[Endpoint documentation] +[source,ts] +---- +client.watcher.start({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. + +[discrete] +==== stats +Get Watcher statistics. +This API always returns basic metrics. +You retrieve more metrics by using the metric parameter. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-stats[Endpoint documentation] +[source,ts] +---- +client.watcher.stats({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`metric` (Optional, Enum("_all" | "queued_watches" | "current_watches" | "pending_watches") | Enum("_all" | "queued_watches" | "current_watches" | "pending_watches")[])*: Defines which additional metrics are included in the response. +** *`emit_stacktraces` (Optional, boolean)*: Defines whether stack traces are generated for each watch that is running. + +[discrete] +==== stop +Stop the watch service. +Stop the Watcher service if it is running. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-stop[Endpoint documentation] +[source,ts] +---- +client.watcher.stop({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for the master node. +If the master node is not available before the timeout expires, the request fails and returns an error. +To indicate that the request should never timeout, set it to `-1`. + +[discrete] +==== update_settings +Update Watcher index settings. +Update settings for the Watcher internal index (`.watches`). +Only a subset of settings can be modified. +This includes `index.auto_expand_replicas` and `index.number_of_replicas`. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-update-settings[Endpoint documentation] +[source,ts] +---- +client.watcher.updateSettings({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`index.auto_expand_replicas` (Optional, string)* +** *`index.number_of_replicas` (Optional, number)* +** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +** *`timeout` (Optional, string | -1 | 0)*: The period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. + +[discrete] +=== xpack +[discrete] +==== info +Get information. +The information provided by the API includes: + +* Build information including the build number and timestamp. +* License information about the currently installed license. +* Feature information for the features that are currently enabled and available under the current license. + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-info[Endpoint documentation] +[source,ts] +---- +client.xpack.info({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`categories` (Optional, Enum("build" | "features" | "license")[])*: A list of the information categories to include in the response. +For example, `build,license,features`. +** *`accept_enterprise` (Optional, boolean)*: If this param is used it must be set to true +** *`human` (Optional, boolean)*: Defines whether additional human-readable information is included in the response. +In particular, it adds descriptions and a tag line. + +[discrete] +==== usage +Get usage information. +Get information about the features that are currently enabled and available under the current license. +The API also provides some usage statistics. + +https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-xpack[Endpoint documentation] +[source,ts] +---- +client.xpack.usage({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`master_timeout` (Optional, string | -1 | 0)*: The period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +To indicate that the request should never timeout, set it to `-1`. + diff --git a/docs/reference/api-reference.md b/docs/reference/api-reference.md index e216c1981..627c80ce9 100644 --- a/docs/reference/api-reference.md +++ b/docs/reference/api-reference.md @@ -1,14 +1,18 @@ --- mapped_pages: - https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/api-reference.html +comment: | + IMPORTANT: This file is autogenerated, DO NOT send pull requests that change this file directly. + You should update the script that does the generation, which can be found in: + https://github.com/elastic/elastic-client-generator-js --- # API Reference [api-reference] - -## bulk [_bulk] - -Bulk index or delete documents. Perform multiple `index`, `create`, `delete`, and `update` actions in a single request. This reduces overhead and can greatly increase indexing speed. +## client.bulk [_bulk] +Bulk index or delete documents. +Perform multiple `index`, `create`, `delete`, and `update` actions in a single request. +This reduces overhead and can greatly increase indexing speed. If the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or index alias: @@ -33,29 +37,34 @@ action_and_meta_data\n optional_source\n ``` -The `index` and `create` actions expect a source on the next line and have the same semantics as the `op_type` parameter in the standard index API. A `create` action fails if a document with the same ID already exists in the target An `index` action adds or replaces a document as necessary. - -::::{note} -Data streams support only the `create` action. To update or delete a document in a data stream, you must target the backing index containing the document. -:::: +The `index` and `create` actions expect a source on the next line and have the same semantics as the `op_type` parameter in the standard index API. +A `create` action fails if a document with the same ID already exists in the target +An `index` action adds or replaces a document as necessary. +NOTE: Data streams support only the `create` action. +To update or delete a document in a data stream, you must target the backing index containing the document. An `update` action expects that the partial doc, upsert, and script and its options are specified on the next line. A `delete` action does not expect a source on the next line and has the same semantics as the standard delete API. -::::{note} -The final line of data must end with a newline character (`\n`). Each newline character may be preceded by a carriage return (`\r`). When sending NDJSON data to the `_bulk` endpoint, use a `Content-Type` header of `application/json` or `application/x-ndjson`. Because this format uses literal newline characters (`\n`) as delimiters, make sure that the JSON actions and sources are not pretty printed. -:::: - +NOTE: The final line of data must end with a newline character (`\n`). +Each newline character may be preceded by a carriage return (`\r`). +When sending NDJSON data to the `_bulk` endpoint, use a `Content-Type` header of `application/json` or `application/x-ndjson`. +Because this format uses literal newline characters (`\n`) as delimiters, make sure that the JSON actions and sources are not pretty printed. -If you provide a target in the request path, it is used for any actions that don’t explicitly specify an `_index` argument. +If you provide a target in the request path, it is used for any actions that don't explicitly specify an `_index` argument. -A note on the format: the idea here is to make processing as fast as possible. As some of the actions are redirected to other shards on other nodes, only `action_meta_data` is parsed on the receiving node side. +A note on the format: the idea here is to make processing as fast as possible. +As some of the actions are redirected to other shards on other nodes, only `action_meta_data` is parsed on the receiving node side. Client libraries using this protocol should try and strive to do something similar on the client side, and reduce buffering as much as possible. -There is no "correct" number of actions to perform in a single bulk request. Experiment with different settings to find the optimal size for your particular workload. Note that Elasticsearch limits the maximum size of a HTTP request to 100mb by default so clients must ensure that no request exceeds this size. It is not possible to index a single document that exceeds the size limit, so you must pre-process any such documents into smaller pieces before sending them to Elasticsearch. For instance, split documents into pages or chapters before indexing them, or store raw binary data in a system outside Elasticsearch and replace the raw data with a link to the external system in the documents that you send to Elasticsearch. +There is no "correct" number of actions to perform in a single bulk request. +Experiment with different settings to find the optimal size for your particular workload. +Note that Elasticsearch limits the maximum size of a HTTP request to 100mb by default so clients must ensure that no request exceeds this size. +It is not possible to index a single document that exceeds the size limit, so you must pre-process any such documents into smaller pieces before sending them to Elasticsearch. +For instance, split documents into pages or chapters before indexing them, or store raw binary data in a system outside Elasticsearch and replace the raw data with a link to the external system in the documents that you send to Elasticsearch. **Client suppport for bulk requests** @@ -70,7 +79,8 @@ Some of the officially supported clients provide helpers to assist with bulk req **Submitting bulk requests with cURL** -If you’re providing text file input to `curl`, you must use the `--data-binary` flag instead of plain `-d`. The latter doesn’t preserve newlines. For example: +If you're providing text file input to `curl`, you must use the `--data-binary` flag instead of plain `-d`. +The latter doesn't preserve newlines. For example: ``` $ cat requests @@ -82,20 +92,21 @@ $ curl -s -H "Content-Type: application/x-ndjson" -XPOST localhost:9200/_bulk -- **Optimistic concurrency control** -Each `index` and `delete` action within a bulk API call may include the `if_seq_no` and `if_primary_term` parameters in their respective action and meta data lines. The `if_seq_no` and `if_primary_term` parameters control how operations are run, based on the last modification to existing documents. See Optimistic concurrency control for more details. +Each `index` and `delete` action within a bulk API call may include the `if_seq_no` and `if_primary_term` parameters in their respective action and meta data lines. +The `if_seq_no` and `if_primary_term` parameters control how operations are run, based on the last modification to existing documents. See Optimistic concurrency control for more details. **Versioning** -Each bulk item can include the version value using the `version` field. It automatically follows the behavior of the index or delete operation based on the `_version` mapping. It also support the `version_type`. +Each bulk item can include the version value using the `version` field. +It automatically follows the behavior of the index or delete operation based on the `_version` mapping. +It also support the `version_type`. **Routing** -Each bulk item can include the routing value using the `routing` field. It automatically follows the behavior of the index or delete operation based on the `_routing` mapping. - -::::{note} -Data streams do not support custom routing unless they were created with the `allow_custom_routing` setting enabled in the template. -:::: +Each bulk item can include the routing value using the `routing` field. +It automatically follows the behavior of the index or delete operation based on the `_routing` mapping. +NOTE: Data streams do not support custom routing unless they were created with the `allow_custom_routing` setting enabled in the template. **Wait for active shards** @@ -105,121 +116,114 @@ When making bulk calls, you can set the `wait_for_active_shards` parameter to re Control when the changes made by this request are visible to search. -::::{note} -Only the shards that receive the bulk request will be affected by refresh. Imagine a `_bulk?refresh=wait_for` request with three documents in it that happen to be routed to different shards in an index with five shards. The request will only wait for those three shards to refresh. The other two shards that make up the index do not participate in the `_bulk` request at all. -:::: - +NOTE: Only the shards that receive the bulk request will be affected by refresh. +Imagine a `_bulk?refresh=wait_for` request with three documents in it that happen to be routed to different shards in an index with five shards. +The request will only wait for those three shards to refresh. +The other two shards that make up the index do not participate in the `_bulk` request at all. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-bulk) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-bulk) ```ts client.bulk({ ... }) ``` +### Arguments [_arguments_bulk] +#### Request (object) [_request_bulk] -### Arguments [_arguments] - -* **Request (object):** - - * **`index` (Optional, string)**: The name of the data stream, index, or index alias to perform bulk actions on. - * **`operations` (Optional, { index, create, update, delete } | { detect_noop, doc, doc_as_upsert, script, scripted_upsert, _source, upsert } | object[])** - * **`list_executed_pipelines` (Optional, boolean)**: If `true`, the response will include the ingest pipelines that were run for each index or create. - * **`pipeline` (Optional, string)**: The pipeline identifier to use to preprocess incoming documents. If the index has a default ingest pipeline specified, setting the value to `_none` turns off the default ingest pipeline for this request. If a final pipeline is configured, it will always run regardless of the value of this parameter. - * **`refresh` (Optional, Enum(true | false | "wait_for"))**: If `true`, Elasticsearch refreshes the affected shards to make this operation visible to search. If `wait_for`, wait for a refresh to make this operation visible to search. If `false`, do nothing with refreshes. Valid values: `true`, `false`, `wait_for`. - * **`routing` (Optional, string)**: A custom value that is used to route operations to a specific shard. - * **`_source` (Optional, boolean | string | string[])**: Indicates whether to return the `_source` field (`true` or `false`) or contains a list of fields to return. - * **`_source_excludes` (Optional, string | string[])**: A list of source fields to exclude from the response. You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. - * **`_source_includes` (Optional, string | string[])**: A list of source fields to include in the response. If this parameter is specified, only these source fields are returned. You can exclude fields from this subset using the `_source_excludes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. - * **`timeout` (Optional, string | -1 | 0)**: The period each action waits for the following operations: automatic index creation, dynamic mapping updates, and waiting for active shards. The default is `1m` (one minute), which guarantees Elasticsearch waits for at least the timeout before failing. The actual wait time could be longer, particularly when multiple waits occur. - * **`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))**: The number of shard copies that must be active before proceeding with the operation. Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). The default is `1`, which waits for each primary shard to be active. - * **`require_alias` (Optional, boolean)**: If `true`, the request’s actions must target an index alias. - * **`require_data_stream` (Optional, boolean)**: If `true`, the request’s actions must target a data stream (existing or to be created). +- **`index` (Optional, string)**: The name of the data stream, index, or index alias to perform bulk actions on. +- **`operations` (Optional, { index, create, update, delete } | { detect_noop, doc, doc_as_upsert, script, scripted_upsert, _source, upsert } | object[])** +- **`include_source_on_error` (Optional, boolean)**: True or false if to include the document source in the error message in case of parsing errors. +- **`list_executed_pipelines` (Optional, boolean)**: If `true`, the response will include the ingest pipelines that were run for each index or create. +- **`pipeline` (Optional, string)**: The pipeline identifier to use to preprocess incoming documents. If the index has a default ingest pipeline specified, setting the value to `_none` turns off the default ingest pipeline for this request. If a final pipeline is configured, it will always run regardless of the value of this parameter. +- **`refresh` (Optional, Enum(true | false | "wait_for"))**: If `true`, Elasticsearch refreshes the affected shards to make this operation visible to search. If `wait_for`, wait for a refresh to make this operation visible to search. If `false`, do nothing with refreshes. Valid values: `true`, `false`, `wait_for`. +- **`routing` (Optional, string)**: A custom value that is used to route operations to a specific shard. +- **`_source` (Optional, boolean | string | string[])**: Indicates whether to return the `_source` field (`true` or `false`) or contains a list of fields to return. +- **`_source_excludes` (Optional, string | string[])**: A list of source fields to exclude from the response. You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. +- **`_source_includes` (Optional, string | string[])**: A list of source fields to include in the response. If this parameter is specified, only these source fields are returned. You can exclude fields from this subset using the `_source_excludes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. +- **`timeout` (Optional, string | -1 | 0)**: The period each action waits for the following operations: automatic index creation, dynamic mapping updates, and waiting for active shards. The default is `1m` (one minute), which guarantees Elasticsearch waits for at least the timeout before failing. The actual wait time could be longer, particularly when multiple waits occur. +- **`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))**: The number of shard copies that must be active before proceeding with the operation. Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). The default is `1`, which waits for each primary shard to be active. +- **`require_alias` (Optional, boolean)**: If `true`, the request's actions must target an index alias. +- **`require_data_stream` (Optional, boolean)**: If `true`, the request's actions must target a data stream (existing or to be created). +## client.clearScroll [_clear_scroll] +Clear a scrolling search. +Clear the search context and results for a scrolling search. - -## clear_scroll [_clear_scroll] - -Clear a scrolling search. Clear the search context and results for a scrolling search. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-clear-scroll) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-clear-scroll) ```ts client.clearScroll({ ... }) ``` +### Arguments [_arguments_clear_scroll] +#### Request (object) [_request_clear_scroll] -### Arguments [_arguments_2] - -* **Request (object):** - - * **`scroll_id` (Optional, string | string[])**: A list of scroll IDs to clear. To clear all scroll IDs, use `_all`. IMPORTANT: Scroll IDs can be long. It is recommended to specify scroll IDs in the request body parameter. - - - -## close_point_in_time [_close_point_in_time] +- **`scroll_id` (Optional, string | string[])**: A list of scroll IDs to clear. To clear all scroll IDs, use `_all`. IMPORTANT: Scroll IDs can be long. It is recommended to specify scroll IDs in the request body parameter. -Close a point in time. A point in time must be opened explicitly before being used in search requests. The `keep_alive` parameter tells Elasticsearch how long it should persist. A point in time is automatically closed when the `keep_alive` period has elapsed. However, keeping points in time has a cost; close them as soon as they are no longer required for search requests. +## client.closePointInTime [_close_point_in_time] +Close a point in time. +A point in time must be opened explicitly before being used in search requests. +The `keep_alive` parameter tells Elasticsearch how long it should persist. +A point in time is automatically closed when the `keep_alive` period has elapsed. +However, keeping points in time has a cost; close them as soon as they are no longer required for search requests. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-open-point-in-time) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-open-point-in-time) ```ts client.closePointInTime({ id }) ``` +### Arguments [_arguments_close_point_in_time] +#### Request (object) [_request_close_point_in_time] -### Arguments [_arguments_3] - -* **Request (object):** - - * **`id` (string)**: The ID of the point-in-time. - +- **`id` (string)**: The ID of the point-in-time. +## client.count [_count] +Count search results. +Get the number of documents matching a query. -## count [_count] - -Count search results. Get the number of documents matching a query. - -The query can either be provided using a simple query string as a parameter or using the Query DSL defined within the request body. The latter must be nested in a `query` key, which is the same as the search API. +The query can be provided either by using a simple query string as a parameter, or by defining Query DSL within the request body. +The query is optional. When no query is provided, the API uses `match_all` to count all the documents. The count API supports multi-target syntax. You can run a single count API search across multiple data streams and indices. -The operation is broadcast across all shards. For each shard ID group, a replica is chosen and the search is run against it. This means that replicas increase the scalability of the count. +The operation is broadcast across all shards. +For each shard ID group, a replica is chosen and the search is run against it. +This means that replicas increase the scalability of the count. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-count) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-count) ```ts client.count({ ... }) ``` - - -### Arguments [_arguments_4] - -* **Request (object):** - - * **`index` (Optional, string | string[])**: A list of data streams, indices, and aliases to search. It supports wildcards (`*`). To search all data streams and indices, omit this parameter or use `*` or `_all`. - * **`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: Defines the search definition using the Query DSL. The query is optional, and when not provided, it will use `match_all` to count all the docs. - * **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. - * **`analyzer` (Optional, string)**: The analyzer to use for the query string. This parameter can be used only when the `q` query string parameter is specified. - * **`analyze_wildcard` (Optional, boolean)**: If `true`, wildcard and prefix queries are analyzed. This parameter can be used only when the `q` query string parameter is specified. - * **`default_operator` (Optional, Enum("and" | "or"))**: The default operator for query string query: `AND` or `OR`. This parameter can be used only when the `q` query string parameter is specified. - * **`df` (Optional, string)**: The field to use as a default when no field prefix is given in the query string. This parameter can be used only when the `q` query string parameter is specified. - * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports a list of values, such as `open,hidden`. - * **`ignore_throttled` (Optional, boolean)**: If `true`, concrete, expanded, or aliased indices are ignored when frozen. - * **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. - * **`lenient` (Optional, boolean)**: If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. This parameter can be used only when the `q` query string parameter is specified. - * **`min_score` (Optional, number)**: The minimum `_score` value that documents must have to be included in the result. - * **`preference` (Optional, string)**: The node or shard the operation should be performed on. By default, it is random. - * **`routing` (Optional, string)**: A custom value used to route operations to a specific shard. - * **`terminate_after` (Optional, number)**: The maximum number of documents to collect for each shard. If a query reaches this limit, Elasticsearch terminates the query early. Elasticsearch collects documents before sorting. IMPORTANT: Use with caution. Elasticsearch applies this parameter to each shard handling the request. When possible, let Elasticsearch perform early termination automatically. Avoid specifying this parameter for requests that target data streams with backing indices across multiple data tiers. - * **`q` (Optional, string)**: The query in Lucene query string syntax. - - - -## create [_create] - +### Arguments [_arguments_count] + +#### Request (object) [_request_count] + +- **`index` (Optional, string | string[])**: A list of data streams, indices, and aliases to search. It supports wildcards (`*`). To search all data streams and indices, omit this parameter or use `*` or `_all`. +- **`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: Defines the search query using Query DSL. A request body query cannot be used with the `q` query string parameter. +- **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. +- **`analyzer` (Optional, string)**: The analyzer to use for the query string. This parameter can be used only when the `q` query string parameter is specified. +- **`analyze_wildcard` (Optional, boolean)**: If `true`, wildcard and prefix queries are analyzed. This parameter can be used only when the `q` query string parameter is specified. +- **`default_operator` (Optional, Enum("and" | "or"))**: The default operator for query string query: `AND` or `OR`. This parameter can be used only when the `q` query string parameter is specified. +- **`df` (Optional, string)**: The field to use as a default when no field prefix is given in the query string. This parameter can be used only when the `q` query string parameter is specified. +- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports a list of values, such as `open,hidden`. +- **`ignore_throttled` (Optional, boolean)**: If `true`, concrete, expanded, or aliased indices are ignored when frozen. +- **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. +- **`lenient` (Optional, boolean)**: If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. This parameter can be used only when the `q` query string parameter is specified. +- **`min_score` (Optional, number)**: The minimum `_score` value that documents must have to be included in the result. +- **`preference` (Optional, string)**: The node or shard the operation should be performed on. By default, it is random. +- **`routing` (Optional, string)**: A custom value used to route operations to a specific shard. +- **`terminate_after` (Optional, number)**: The maximum number of documents to collect for each shard. If a query reaches this limit, Elasticsearch terminates the query early. Elasticsearch collects documents before sorting. IMPORTANT: Use with caution. Elasticsearch applies this parameter to each shard handling the request. When possible, let Elasticsearch perform early termination automatically. Avoid specifying this parameter for requests that target data streams with backing indices across multiple data tiers. +- **`q` (Optional, string)**: The query in Lucene query string syntax. This parameter cannot be used with a request body. + +## client.create [_create] Create a new document in the index. -You can index a new JSON document with the `//_doc/` or `//_create/<_id>` APIs Using `_create` guarantees that the document is indexed only if it does not already exist. It returns a 409 response when a document with a same ID already exists in the index. To update an existing document, you must use the `//_doc/` API. +You can index a new JSON document with the `//_doc/` or `//_create/<_id>` APIs +Using `_create` guarantees that the document is indexed only if it does not already exist. +It returns a 409 response when a document with a same ID already exists in the index. +To update an existing document, you must use the `//_doc/` API. If the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or index alias: @@ -230,91 +234,109 @@ Automatic data stream creation requires a matching index template with data stre **Automatically create data streams and indices** -If the request’s target doesn’t exist and matches an index template with a `data_stream` definition, the index operation automatically creates the data stream. - -If the target doesn’t exist and doesn’t match a data stream template, the operation automatically creates the index and applies any matching index templates. - -::::{note} -Elasticsearch includes several built-in index templates. To avoid naming collisions with these templates, refer to index pattern documentation. -:::: +If the request's target doesn't exist and matches an index template with a `data_stream` definition, the index operation automatically creates the data stream. +If the target doesn't exist and doesn't match a data stream template, the operation automatically creates the index and applies any matching index templates. -If no mapping exists, the index operation creates a dynamic mapping. By default, new fields and objects are automatically added to the mapping if needed. +NOTE: Elasticsearch includes several built-in index templates. To avoid naming collisions with these templates, refer to index pattern documentation. -Automatic index creation is controlled by the `action.auto_create_index` setting. If it is `true`, any index can be created automatically. You can modify this setting to explicitly allow or block automatic creation of indices that match specified patterns or set it to `false` to turn off automatic index creation entirely. Specify a list of patterns you want to allow or prefix each pattern with `+` or `-` to indicate whether it should be allowed or blocked. When a list is specified, the default behaviour is to disallow. +If no mapping exists, the index operation creates a dynamic mapping. +By default, new fields and objects are automatically added to the mapping if needed. -::::{note} -The `action.auto_create_index` setting affects the automatic creation of indices only. It does not affect the creation of data streams. -:::: +Automatic index creation is controlled by the `action.auto_create_index` setting. +If it is `true`, any index can be created automatically. +You can modify this setting to explicitly allow or block automatic creation of indices that match specified patterns or set it to `false` to turn off automatic index creation entirely. +Specify a list of patterns you want to allow or prefix each pattern with `+` or `-` to indicate whether it should be allowed or blocked. +When a list is specified, the default behaviour is to disallow. +NOTE: The `action.auto_create_index` setting affects the automatic creation of indices only. +It does not affect the creation of data streams. **Routing** -By default, shard placement — or routing — is controlled by using a hash of the document’s ID value. For more explicit control, the value fed into the hash function used by the router can be directly specified on a per-operation basis using the `routing` parameter. +By default, shard placement — or routing — is controlled by using a hash of the document's ID value. +For more explicit control, the value fed into the hash function used by the router can be directly specified on a per-operation basis using the `routing` parameter. -When setting up explicit mapping, you can also use the `_routing` field to direct the index operation to extract the routing value from the document itself. This does come at the (very minimal) cost of an additional document parsing pass. If the `_routing` mapping is defined and set to be required, the index operation will fail if no routing value is provided or extracted. - -::::{note} -Data streams do not support custom routing unless they were created with the `allow_custom_routing` setting enabled in the template. -:::: +When setting up explicit mapping, you can also use the `_routing` field to direct the index operation to extract the routing value from the document itself. +This does come at the (very minimal) cost of an additional document parsing pass. +If the `_routing` mapping is defined and set to be required, the index operation will fail if no routing value is provided or extracted. +NOTE: Data streams do not support custom routing unless they were created with the `allow_custom_routing` setting enabled in the template. **Distributed** -The index operation is directed to the primary shard based on its route and performed on the actual node containing this shard. After the primary shard completes the operation, if needed, the update is distributed to applicable replicas. +The index operation is directed to the primary shard based on its route and performed on the actual node containing this shard. +After the primary shard completes the operation, if needed, the update is distributed to applicable replicas. **Active shards** -To improve the resiliency of writes to the system, indexing operations can be configured to wait for a certain number of active shard copies before proceeding with the operation. If the requisite number of active shard copies are not available, then the write operation must wait and retry, until either the requisite shard copies have started or a timeout occurs. By default, write operations only wait for the primary shards to be active before proceeding (that is to say `wait_for_active_shards` is `1`). This default can be overridden in the index settings dynamically by setting `index.write.wait_for_active_shards`. To alter this behavior per operation, use the `wait_for_active_shards request` parameter. +To improve the resiliency of writes to the system, indexing operations can be configured to wait for a certain number of active shard copies before proceeding with the operation. +If the requisite number of active shard copies are not available, then the write operation must wait and retry, until either the requisite shard copies have started or a timeout occurs. +By default, write operations only wait for the primary shards to be active before proceeding (that is to say `wait_for_active_shards` is `1`). +This default can be overridden in the index settings dynamically by setting `index.write.wait_for_active_shards`. +To alter this behavior per operation, use the `wait_for_active_shards request` parameter. -Valid values are all or any positive integer up to the total number of configured copies per shard in the index (which is `number_of_replicas`+1). Specifying a negative value or a number greater than the number of shard copies will throw an error. +Valid values are all or any positive integer up to the total number of configured copies per shard in the index (which is `number_of_replicas`+1). +Specifying a negative value or a number greater than the number of shard copies will throw an error. -For example, suppose you have a cluster of three nodes, A, B, and C and you create an index index with the number of replicas set to 3 (resulting in 4 shard copies, one more copy than there are nodes). If you attempt an indexing operation, by default the operation will only ensure the primary copy of each shard is available before proceeding. This means that even if B and C went down and A hosted the primary shard copies, the indexing operation would still proceed with only one copy of the data. If `wait_for_active_shards` is set on the request to `3` (and all three nodes are up), the indexing operation will require 3 active shard copies before proceeding. This requirement should be met because there are 3 active nodes in the cluster, each one holding a copy of the shard. However, if you set `wait_for_active_shards` to `all` (or to `4`, which is the same in this situation), the indexing operation will not proceed as you do not have all 4 copies of each shard active in the index. The operation will timeout unless a new node is brought up in the cluster to host the fourth copy of the shard. +For example, suppose you have a cluster of three nodes, A, B, and C and you create an index index with the number of replicas set to 3 (resulting in 4 shard copies, one more copy than there are nodes). +If you attempt an indexing operation, by default the operation will only ensure the primary copy of each shard is available before proceeding. +This means that even if B and C went down and A hosted the primary shard copies, the indexing operation would still proceed with only one copy of the data. +If `wait_for_active_shards` is set on the request to `3` (and all three nodes are up), the indexing operation will require 3 active shard copies before proceeding. +This requirement should be met because there are 3 active nodes in the cluster, each one holding a copy of the shard. +However, if you set `wait_for_active_shards` to `all` (or to `4`, which is the same in this situation), the indexing operation will not proceed as you do not have all 4 copies of each shard active in the index. +The operation will timeout unless a new node is brought up in the cluster to host the fourth copy of the shard. -It is important to note that this setting greatly reduces the chances of the write operation not writing to the requisite number of shard copies, but it does not completely eliminate the possibility, because this check occurs before the write operation starts. After the write operation is underway, it is still possible for replication to fail on any number of shard copies but still succeed on the primary. The `_shards` section of the API response reveals the number of shard copies on which replication succeeded and failed. +It is important to note that this setting greatly reduces the chances of the write operation not writing to the requisite number of shard copies, but it does not completely eliminate the possibility, because this check occurs before the write operation starts. +After the write operation is underway, it is still possible for replication to fail on any number of shard copies but still succeed on the primary. +The `_shards` section of the API response reveals the number of shard copies on which replication succeeded and failed. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-create) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-create) ```ts client.create({ id, index }) ``` - - -### Arguments [_arguments_5] - -* **Request (object):** - - * **`id` (string)**: A unique identifier for the document. To automatically generate a document ID, use the `POST //_doc/` request format. - * **`index` (string)**: The name of the data stream or index to target. If the target doesn’t exist and matches the name or wildcard (`*`) pattern of an index template with a `data_stream` definition, this request creates the data stream. If the target doesn’t exist and doesn’t match a data stream template, this request creates the index. - * **`document` (Optional, object)**: A document. - * **`pipeline` (Optional, string)**: The ID of the pipeline to use to preprocess incoming documents. If the index has a default ingest pipeline specified, setting the value to `_none` turns off the default ingest pipeline for this request. If a final pipeline is configured, it will always run regardless of the value of this parameter. - * **`refresh` (Optional, Enum(true | false | "wait_for"))**: If `true`, Elasticsearch refreshes the affected shards to make this operation visible to search. If `wait_for`, it waits for a refresh to make this operation visible to search. If `false`, it does nothing with refreshes. - * **`routing` (Optional, string)**: A custom value that is used to route operations to a specific shard. - * **`timeout` (Optional, string | -1 | 0)**: The period the request waits for the following operations: automatic index creation, dynamic mapping updates, waiting for active shards. Elasticsearch waits for at least the specified timeout period before failing. The actual wait time could be longer, particularly when multiple waits occur. This parameter is useful for situations where the primary shard assigned to perform the operation might not be available when the operation runs. Some reasons for this might be that the primary shard is currently recovering from a gateway or undergoing relocation. By default, the operation will wait on the primary shard to become available for at least 1 minute before failing and responding with an error. The actual wait time could be longer, particularly when multiple waits occur. - * **`version` (Optional, number)**: The explicit version number for concurrency control. It must be a non-negative long number. - * **`version_type` (Optional, Enum("internal" | "external" | "external_gte" | "force"))**: The version type. - * **`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))**: The number of shard copies that must be active before proceeding with the operation. You can set it to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). The default value of `1` means it waits for each primary shard to be active. - - - -## delete [_delete] - +### Arguments [_arguments_create] + +#### Request (object) [_request_create] + +- **`id` (string)**: A unique identifier for the document. To automatically generate a document ID, use the `POST //_doc/` request format. +- **`index` (string)**: The name of the data stream or index to target. If the target doesn't exist and matches the name or wildcard (`*`) pattern of an index template with a `data_stream` definition, this request creates the data stream. If the target doesn't exist and doesn’t match a data stream template, this request creates the index. +- **`document` (Optional, object)**: A document. +- **`if_primary_term` (Optional, number)**: Only perform the operation if the document has this primary term. +- **`if_seq_no` (Optional, number)**: Only perform the operation if the document has this sequence number. +- **`include_source_on_error` (Optional, boolean)**: True or false if to include the document source in the error message in case of parsing errors. +- **`op_type` (Optional, Enum("index" | "create"))**: Set to `create` to only index the document if it does not already exist (put if absent). If a document with the specified `_id` already exists, the indexing operation will fail. The behavior is the same as using the `/_create` endpoint. If a document ID is specified, this paramater defaults to `index`. Otherwise, it defaults to `create`. If the request targets a data stream, an `op_type` of `create` is required. +- **`pipeline` (Optional, string)**: The ID of the pipeline to use to preprocess incoming documents. If the index has a default ingest pipeline specified, setting the value to `_none` turns off the default ingest pipeline for this request. If a final pipeline is configured, it will always run regardless of the value of this parameter. +- **`refresh` (Optional, Enum(true | false | "wait_for"))**: If `true`, Elasticsearch refreshes the affected shards to make this operation visible to search. If `wait_for`, it waits for a refresh to make this operation visible to search. If `false`, it does nothing with refreshes. +- **`require_alias` (Optional, boolean)**: If `true`, the destination must be an index alias. +- **`require_data_stream` (Optional, boolean)**: If `true`, the request's actions must target a data stream (existing or to be created). +- **`routing` (Optional, string)**: A custom value that is used to route operations to a specific shard. +- **`timeout` (Optional, string | -1 | 0)**: The period the request waits for the following operations: automatic index creation, dynamic mapping updates, waiting for active shards. Elasticsearch waits for at least the specified timeout period before failing. The actual wait time could be longer, particularly when multiple waits occur. This parameter is useful for situations where the primary shard assigned to perform the operation might not be available when the operation runs. Some reasons for this might be that the primary shard is currently recovering from a gateway or undergoing relocation. By default, the operation will wait on the primary shard to become available for at least 1 minute before failing and responding with an error. The actual wait time could be longer, particularly when multiple waits occur. +- **`version` (Optional, number)**: The explicit version number for concurrency control. It must be a non-negative long number. +- **`version_type` (Optional, Enum("internal" | "external" | "external_gte" | "force"))**: The version type. +- **`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))**: The number of shard copies that must be active before proceeding with the operation. You can set it to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). The default value of `1` means it waits for each primary shard to be active. + +## client.delete [_delete] Delete a document. Remove a JSON document from the specified index. -::::{note} -You cannot send deletion requests directly to a data stream. To delete a document in a data stream, you must target the backing index containing the document. -:::: - +NOTE: You cannot send deletion requests directly to a data stream. +To delete a document in a data stream, you must target the backing index containing the document. **Optimistic concurrency control** -Delete operations can be made conditional and only be performed if the last modification to the document was assigned the sequence number and primary term specified by the `if_seq_no` and `if_primary_term` parameters. If a mismatch is detected, the operation will result in a `VersionConflictException` and a status code of `409`. +Delete operations can be made conditional and only be performed if the last modification to the document was assigned the sequence number and primary term specified by the `if_seq_no` and `if_primary_term` parameters. +If a mismatch is detected, the operation will result in a `VersionConflictException` and a status code of `409`. **Versioning** -Each document indexed is versioned. When deleting a document, the version can be specified to make sure the relevant document you are trying to delete is actually being deleted and it has not changed in the meantime. Every write operation run on a document, deletes included, causes its version to be incremented. The version number of a deleted document remains available for a short time after deletion to allow for control of concurrent operations. The length of time for which a deleted document’s version remains available is determined by the `index.gc_deletes` index setting. +Each document indexed is versioned. +When deleting a document, the version can be specified to make sure the relevant document you are trying to delete is actually being deleted and it has not changed in the meantime. +Every write operation run on a document, deletes included, causes its version to be incremented. +The version number of a deleted document remains available for a short time after deletion to allow for control of concurrent operations. +The length of time for which a deleted document's version remains available is determined by the `index.gc_deletes` index setting. **Routing** @@ -328,38 +350,35 @@ For example: DELETE /my-index-000001/_doc/1?routing=shard-1 ``` -This request deletes the document with ID 1, but it is routed based on the user. The document is not deleted if the correct routing is not specified. +This request deletes the document with ID 1, but it is routed based on the user. +The document is not deleted if the correct routing is not specified. **Distributed** -The delete operation gets hashed into a specific shard ID. It then gets redirected into the primary shard within that ID group and replicated (if needed) to shard replicas within that ID group. +The delete operation gets hashed into a specific shard ID. +It then gets redirected into the primary shard within that ID group and replicated (if needed) to shard replicas within that ID group. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-delete) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-delete) ```ts client.delete({ id, index }) ``` +### Arguments [_arguments_delete] +#### Request (object) [_request_delete] -### Arguments [_arguments_6] - -* **Request (object):** - - * **`id` (string)**: A unique identifier for the document. - * **`index` (string)**: The name of the target index. - * **`if_primary_term` (Optional, number)**: Only perform the operation if the document has this primary term. - * **`if_seq_no` (Optional, number)**: Only perform the operation if the document has this sequence number. - * **`refresh` (Optional, Enum(true | false | "wait_for"))**: If `true`, Elasticsearch refreshes the affected shards to make this operation visible to search. If `wait_for`, it waits for a refresh to make this operation visible to search. If `false`, it does nothing with refreshes. - * **`routing` (Optional, string)**: A custom value used to route operations to a specific shard. - * **`timeout` (Optional, string | -1 | 0)**: The period to wait for active shards. This parameter is useful for situations where the primary shard assigned to perform the delete operation might not be available when the delete operation runs. Some reasons for this might be that the primary shard is currently recovering from a store or undergoing relocation. By default, the delete operation will wait on the primary shard to become available for up to 1 minute before failing and responding with an error. - * **`version` (Optional, number)**: An explicit version number for concurrency control. It must match the current version of the document for the request to succeed. - * **`version_type` (Optional, Enum("internal" | "external" | "external_gte" | "force"))**: The version type. - * **`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))**: The minimum number of shard copies that must be active before proceeding with the operation. You can set it to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). The default value of `1` means it waits for each primary shard to be active. - - - -## delete_by_query [_delete_by_query] +- **`id` (string)**: A unique identifier for the document. +- **`index` (string)**: The name of the target index. +- **`if_primary_term` (Optional, number)**: Only perform the operation if the document has this primary term. +- **`if_seq_no` (Optional, number)**: Only perform the operation if the document has this sequence number. +- **`refresh` (Optional, Enum(true | false | "wait_for"))**: If `true`, Elasticsearch refreshes the affected shards to make this operation visible to search. If `wait_for`, it waits for a refresh to make this operation visible to search. If `false`, it does nothing with refreshes. +- **`routing` (Optional, string)**: A custom value used to route operations to a specific shard. +- **`timeout` (Optional, string | -1 | 0)**: The period to wait for active shards. This parameter is useful for situations where the primary shard assigned to perform the delete operation might not be available when the delete operation runs. Some reasons for this might be that the primary shard is currently recovering from a store or undergoing relocation. By default, the delete operation will wait on the primary shard to become available for up to 1 minute before failing and responding with an error. +- **`version` (Optional, number)**: An explicit version number for concurrency control. It must match the current version of the document for the request to succeed. +- **`version_type` (Optional, Enum("internal" | "external" | "external_gte" | "force"))**: The version type. +- **`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))**: The minimum number of shard copies that must be active before proceeding with the operation. You can set it to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). The default value of `1` means it waits for each primary shard to be active. +## client.deleteByQuery [_delete_by_query] Delete documents. Deletes documents that match the specified query. @@ -369,46 +388,59 @@ If the Elasticsearch security features are enabled, you must have the following * `read` * `delete` or `write` -You can specify the query criteria in the request URI or the request body using the same syntax as the search API. When you submit a delete by query request, Elasticsearch gets a snapshot of the data stream or index when it begins processing the request and deletes matching documents using internal versioning. If a document changes between the time that the snapshot is taken and the delete operation is processed, it results in a version conflict and the delete operation fails. - -::::{note} -Documents with a version equal to 0 cannot be deleted using delete by query because internal versioning does not support 0 as a valid version number. -:::: +You can specify the query criteria in the request URI or the request body using the same syntax as the search API. +When you submit a delete by query request, Elasticsearch gets a snapshot of the data stream or index when it begins processing the request and deletes matching documents using internal versioning. +If a document changes between the time that the snapshot is taken and the delete operation is processed, it results in a version conflict and the delete operation fails. +NOTE: Documents with a version equal to 0 cannot be deleted using delete by query because internal versioning does not support 0 as a valid version number. -While processing a delete by query request, Elasticsearch performs multiple search requests sequentially to find all of the matching documents to delete. A bulk delete request is performed for each batch of matching documents. If a search or bulk request is rejected, the requests are retried up to 10 times, with exponential back off. If the maximum retry limit is reached, processing halts and all failed requests are returned in the response. Any delete requests that completed successfully still stick, they are not rolled back. +While processing a delete by query request, Elasticsearch performs multiple search requests sequentially to find all of the matching documents to delete. +A bulk delete request is performed for each batch of matching documents. +If a search or bulk request is rejected, the requests are retried up to 10 times, with exponential back off. +If the maximum retry limit is reached, processing halts and all failed requests are returned in the response. +Any delete requests that completed successfully still stick, they are not rolled back. -You can opt to count version conflicts instead of halting and returning by setting `conflicts` to `proceed`. Note that if you opt to count version conflicts the operation could attempt to delete more documents from the source than `max_docs` until it has successfully deleted `max_docs documents`, or it has gone through every document in the source query. +You can opt to count version conflicts instead of halting and returning by setting `conflicts` to `proceed`. +Note that if you opt to count version conflicts the operation could attempt to delete more documents from the source than `max_docs` until it has successfully deleted `max_docs documents`, or it has gone through every document in the source query. **Throttling delete requests** -To control the rate at which delete by query issues batches of delete operations, you can set `requests_per_second` to any positive decimal number. This pads each batch with a wait time to throttle the rate. Set `requests_per_second` to `-1` to disable throttling. +To control the rate at which delete by query issues batches of delete operations, you can set `requests_per_second` to any positive decimal number. +This pads each batch with a wait time to throttle the rate. +Set `requests_per_second` to `-1` to disable throttling. -Throttling uses a wait time between batches so that the internal scroll requests can be given a timeout that takes the request padding into account. The padding time is the difference between the batch size divided by the `requests_per_second` and the time spent writing. By default the batch size is `1000`, so if `requests_per_second` is set to `500`: +Throttling uses a wait time between batches so that the internal scroll requests can be given a timeout that takes the request padding into account. +The padding time is the difference between the batch size divided by the `requests_per_second` and the time spent writing. +By default the batch size is `1000`, so if `requests_per_second` is set to `500`: ``` target_time = 1000 / 500 per second = 2 seconds wait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds ``` -Since the batch is issued as a single `_bulk` request, large batch sizes cause Elasticsearch to create many requests and wait before starting the next set. This is "bursty" instead of "smooth". +Since the batch is issued as a single `_bulk` request, large batch sizes cause Elasticsearch to create many requests and wait before starting the next set. +This is "bursty" instead of "smooth". **Slicing** -Delete by query supports sliced scroll to parallelize the delete process. This can improve efficiency and provide a convenient way to break the request down into smaller parts. +Delete by query supports sliced scroll to parallelize the delete process. +This can improve efficiency and provide a convenient way to break the request down into smaller parts. -Setting `slices` to `auto` lets Elasticsearch choose the number of slices to use. This setting will use one slice per shard, up to a certain limit. If there are multiple source data streams or indices, it will choose the number of slices based on the index or backing index with the smallest number of shards. Adding slices to the delete by query operation creates sub-requests which means it has some quirks: +Setting `slices` to `auto` lets Elasticsearch choose the number of slices to use. +This setting will use one slice per shard, up to a certain limit. +If there are multiple source data streams or indices, it will choose the number of slices based on the index or backing index with the smallest number of shards. +Adding slices to the delete by query operation creates sub-requests which means it has some quirks: * You can see these requests in the tasks APIs. These sub-requests are "child" tasks of the task for the request with slices. * Fetching the status of the task for the request with slices only contains the status of completed slices. * These sub-requests are individually addressable for things like cancellation and rethrottling. * Rethrottling the request with `slices` will rethrottle the unfinished sub-request proportionally. * Canceling the request with `slices` will cancel each sub-request. -* Due to the nature of `slices` each sub-request won’t get a perfectly even portion of the documents. All documents will be addressed, but some slices may be larger than others. Expect larger slices to have a more even distribution. +* Due to the nature of `slices` each sub-request won't get a perfectly even portion of the documents. All documents will be addressed, but some slices may be larger than others. Expect larger slices to have a more even distribution. * Parameters like `requests_per_second` and `max_docs` on a request with `slices` are distributed proportionally to each sub-request. Combine that with the earlier point about distribution being uneven and you should conclude that using `max_docs` with `slices` might not result in exactly `max_docs` documents being deleted. * Each sub-request gets a slightly different snapshot of the source data stream or index though these are all taken at approximately the same time. -If you’re slicing manually or otherwise tuning automatic slicing, keep in mind that: +If you're slicing manually or otherwise tuning automatic slicing, keep in mind that: * Query performance is most efficient when the number of slices is equal to the number of shards in the index or backing index. If that number is large (for example, 500), choose a lower number as too many `slices` hurts performance. Setting `slices` higher than the number of shards generally does not improve efficiency and adds overhead. * Delete performance scales linearly across available resources with the number of slices. @@ -425,261 +457,244 @@ POST _tasks/r1A2WoRbTwKZ516z6NEs5A:36619/_cancel The task ID can be found by using the get tasks API. -Cancellation should happen quickly but might take a few seconds. The get task status API will continue to list the delete by query task until this task checks that it has been cancelled and terminates itself. +Cancellation should happen quickly but might take a few seconds. +The get task status API will continue to list the delete by query task until this task checks that it has been cancelled and terminates itself. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-delete-by-query) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-delete-by-query) ```ts client.deleteByQuery({ index }) ``` - - -### Arguments [_arguments_7] - -* **Request (object):** - - * **`index` (string | string[])**: A list of data streams, indices, and aliases to search. It supports wildcards (`*`). To search all data streams or indices, omit this parameter or use `*` or `_all`. - * **`max_docs` (Optional, number)**: The maximum number of documents to delete. - * **`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: The documents to delete specified with Query DSL. - * **`slice` (Optional, { field, id, max })**: Slice the request manually using the provided slice ID and total number of slices. - * **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. - * **`analyzer` (Optional, string)**: Analyzer to use for the query string. This parameter can be used only when the `q` query string parameter is specified. - * **`analyze_wildcard` (Optional, boolean)**: If `true`, wildcard and prefix queries are analyzed. This parameter can be used only when the `q` query string parameter is specified. - * **`conflicts` (Optional, Enum("abort" | "proceed"))**: What to do if delete by query hits version conflicts: `abort` or `proceed`. - * **`default_operator` (Optional, Enum("and" | "or"))**: The default operator for query string query: `AND` or `OR`. This parameter can be used only when the `q` query string parameter is specified. - * **`df` (Optional, string)**: The field to use as default where no field prefix is given in the query string. This parameter can be used only when the `q` query string parameter is specified. - * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports a list of values, such as `open,hidden`. - * **`from` (Optional, number)**: Starting offset (default: 0) - * **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. - * **`lenient` (Optional, boolean)**: If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. This parameter can be used only when the `q` query string parameter is specified. - * **`preference` (Optional, string)**: The node or shard the operation should be performed on. It is random by default. - * **`refresh` (Optional, boolean)**: If `true`, Elasticsearch refreshes all shards involved in the delete by query after the request completes. This is different than the delete API’s `refresh` parameter, which causes just the shard that received the delete request to be refreshed. Unlike the delete API, it does not support `wait_for`. - * **`request_cache` (Optional, boolean)**: If `true`, the request cache is used for this request. Defaults to the index-level setting. - * **`requests_per_second` (Optional, float)**: The throttle for this request in sub-requests per second. - * **`routing` (Optional, string)**: A custom value used to route operations to a specific shard. - * **`q` (Optional, string)**: A query in the Lucene query string syntax. - * **`scroll` (Optional, string | -1 | 0)**: The period to retain the search context for scrolling. - * **`scroll_size` (Optional, number)**: The size of the scroll request that powers the operation. - * **`search_timeout` (Optional, string | -1 | 0)**: The explicit timeout for each search request. It defaults to no timeout. - * **`search_type` (Optional, Enum("query_then_fetch" | "dfs_query_then_fetch"))**: The type of the search operation. Available options include `query_then_fetch` and `dfs_query_then_fetch`. - * **`slices` (Optional, number | Enum("auto"))**: The number of slices this task should be divided into. - * **`sort` (Optional, string[])**: A list of `:` pairs. - * **`stats` (Optional, string[])**: The specific `tag` of the request for logging and statistical purposes. - * **`terminate_after` (Optional, number)**: The maximum number of documents to collect for each shard. If a query reaches this limit, Elasticsearch terminates the query early. Elasticsearch collects documents before sorting. Use with caution. Elasticsearch applies this parameter to each shard handling the request. When possible, let Elasticsearch perform early termination automatically. Avoid specifying this parameter for requests that target data streams with backing indices across multiple data tiers. - * **`timeout` (Optional, string | -1 | 0)**: The period each deletion request waits for active shards. - * **`version` (Optional, boolean)**: If `true`, returns the document version as part of a hit. - * **`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))**: The number of shard copies that must be active before proceeding with the operation. Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). The `timeout` value controls how long each write request waits for unavailable shards to become available. - * **`wait_for_completion` (Optional, boolean)**: If `true`, the request blocks until the operation is complete. If `false`, Elasticsearch performs some preflight checks, launches the request, and returns a task you can use to cancel or get the status of the task. Elasticsearch creates a record of this task as a document at `.tasks/task/${taskId}`. When you are done with a task, you should delete the task document so Elasticsearch can reclaim the space. - - - -## delete_by_query_rethrottle [_delete_by_query_rethrottle] - +### Arguments [_arguments_delete_by_query] + +#### Request (object) [_request_delete_by_query] + +- **`index` (string | string[])**: A list of data streams, indices, and aliases to search. It supports wildcards (`*`). To search all data streams or indices, omit this parameter or use `*` or `_all`. +- **`max_docs` (Optional, number)**: The maximum number of documents to delete. +- **`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: The documents to delete specified with Query DSL. +- **`slice` (Optional, { field, id, max })**: Slice the request manually using the provided slice ID and total number of slices. +- **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. +- **`analyzer` (Optional, string)**: Analyzer to use for the query string. This parameter can be used only when the `q` query string parameter is specified. +- **`analyze_wildcard` (Optional, boolean)**: If `true`, wildcard and prefix queries are analyzed. This parameter can be used only when the `q` query string parameter is specified. +- **`conflicts` (Optional, Enum("abort" | "proceed"))**: What to do if delete by query hits version conflicts: `abort` or `proceed`. +- **`default_operator` (Optional, Enum("and" | "or"))**: The default operator for query string query: `AND` or `OR`. This parameter can be used only when the `q` query string parameter is specified. +- **`df` (Optional, string)**: The field to use as default where no field prefix is given in the query string. This parameter can be used only when the `q` query string parameter is specified. +- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports a list of values, such as `open,hidden`. +- **`from` (Optional, number)**: Skips the specified number of documents. +- **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. +- **`lenient` (Optional, boolean)**: If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. This parameter can be used only when the `q` query string parameter is specified. +- **`preference` (Optional, string)**: The node or shard the operation should be performed on. It is random by default. +- **`refresh` (Optional, boolean)**: If `true`, Elasticsearch refreshes all shards involved in the delete by query after the request completes. This is different than the delete API's `refresh` parameter, which causes just the shard that received the delete request to be refreshed. Unlike the delete API, it does not support `wait_for`. +- **`request_cache` (Optional, boolean)**: If `true`, the request cache is used for this request. Defaults to the index-level setting. +- **`requests_per_second` (Optional, float)**: The throttle for this request in sub-requests per second. +- **`routing` (Optional, string)**: A custom value used to route operations to a specific shard. +- **`q` (Optional, string)**: A query in the Lucene query string syntax. +- **`scroll` (Optional, string | -1 | 0)**: The period to retain the search context for scrolling. +- **`scroll_size` (Optional, number)**: The size of the scroll request that powers the operation. +- **`search_timeout` (Optional, string | -1 | 0)**: The explicit timeout for each search request. It defaults to no timeout. +- **`search_type` (Optional, Enum("query_then_fetch" | "dfs_query_then_fetch"))**: The type of the search operation. Available options include `query_then_fetch` and `dfs_query_then_fetch`. +- **`slices` (Optional, number | Enum("auto"))**: The number of slices this task should be divided into. +- **`sort` (Optional, string[])**: A list of `:` pairs. +- **`stats` (Optional, string[])**: The specific `tag` of the request for logging and statistical purposes. +- **`terminate_after` (Optional, number)**: The maximum number of documents to collect for each shard. If a query reaches this limit, Elasticsearch terminates the query early. Elasticsearch collects documents before sorting. Use with caution. Elasticsearch applies this parameter to each shard handling the request. When possible, let Elasticsearch perform early termination automatically. Avoid specifying this parameter for requests that target data streams with backing indices across multiple data tiers. +- **`timeout` (Optional, string | -1 | 0)**: The period each deletion request waits for active shards. +- **`version` (Optional, boolean)**: If `true`, returns the document version as part of a hit. +- **`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))**: The number of shard copies that must be active before proceeding with the operation. Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). The `timeout` value controls how long each write request waits for unavailable shards to become available. +- **`wait_for_completion` (Optional, boolean)**: If `true`, the request blocks until the operation is complete. If `false`, Elasticsearch performs some preflight checks, launches the request, and returns a task you can use to cancel or get the status of the task. Elasticsearch creates a record of this task as a document at `.tasks/task/${taskId}`. When you are done with a task, you should delete the task document so Elasticsearch can reclaim the space. + +## client.deleteByQueryRethrottle [_delete_by_query_rethrottle] Throttle a delete by query operation. -Change the number of requests per second for a particular delete by query operation. Rethrottling that speeds up the query takes effect immediately but rethrotting that slows down the query takes effect after completing the current batch to prevent scroll timeouts. +Change the number of requests per second for a particular delete by query operation. +Rethrottling that speeds up the query takes effect immediately but rethrotting that slows down the query takes effect after completing the current batch to prevent scroll timeouts. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-delete-by-query) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-delete-by-query-rethrottle) ```ts client.deleteByQueryRethrottle({ task_id }) ``` +### Arguments [_arguments_delete_by_query_rethrottle] +#### Request (object) [_request_delete_by_query_rethrottle] -### Arguments [_arguments_8] - -* **Request (object):** - - * **`task_id` (string | number)**: The ID for the task. - * **`requests_per_second` (Optional, float)**: The throttle for this request in sub-requests per second. To disable throttling, set it to `-1`. - - +- **`task_id` (string | number)**: The ID for the task. +- **`requests_per_second` (Optional, float)**: The throttle for this request in sub-requests per second. To disable throttling, set it to `-1`. -## delete_script [_delete_script] +## client.deleteScript [_delete_script] +Delete a script or search template. +Deletes a stored script or search template. -Delete a script or search template. Deletes a stored script or search template. - -[Endpoint documentation](docs-content://explore-analyze/scripting.md) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-delete-script) ```ts client.deleteScript({ id }) ``` +### Arguments [_arguments_delete_script] +#### Request (object) [_request_delete_script] -### Arguments [_arguments_9] - -* **Request (object):** - - * **`id` (string)**: Identifier for the stored script or search template. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - - - -## exists [_exists] +- **`id` (string)**: The identifier for the stored script or search template. +- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout. +- **`timeout` (Optional, string | -1 | 0)**: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout. +## client.exists [_exists] Check a document. -Verify that a document exists. For example, check to see if a document with the `_id` 0 exists: +Verify that a document exists. +For example, check to see if a document with the `_id` 0 exists: ``` HEAD my-index-000001/_doc/0 ``` -If the document exists, the API returns a status code of `200 - OK`. If the document doesn’t exist, the API returns `404 - Not Found`. +If the document exists, the API returns a status code of `200 - OK`. +If the document doesn’t exist, the API returns `404 - Not Found`. **Versioning support** You can use the `version` parameter to check the document only if its current version is equal to the specified one. -Internally, Elasticsearch has marked the old document as deleted and added an entirely new document. The old version of the document doesn’t disappear immediately, although you won’t be able to access it. Elasticsearch cleans up deleted documents in the background as you continue to index more data. +Internally, Elasticsearch has marked the old document as deleted and added an entirely new document. +The old version of the document doesn't disappear immediately, although you won't be able to access it. +Elasticsearch cleans up deleted documents in the background as you continue to index more data. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-get) ```ts client.exists({ id, index }) ``` +### Arguments [_arguments_exists] +#### Request (object) [_request_exists] -### Arguments [_arguments_10] - -* **Request (object):** - - * **`id` (string)**: A unique document identifier. - * **`index` (string)**: A list of data streams, indices, and aliases. It supports wildcards (`*`). - * **`preference` (Optional, string)**: The node or shard the operation should be performed on. By default, the operation is randomized between the shard replicas. If it is set to `_local`, the operation will prefer to be run on a local allocated shard when possible. If it is set to a custom value, the value is used to guarantee that the same shards will be used for the same custom value. This can help with "jumping values" when hitting different shards in different refresh states. A sample value can be something like the web session ID or the user name. - * **`realtime` (Optional, boolean)**: If `true`, the request is real-time as opposed to near-real-time. - * **`refresh` (Optional, boolean)**: If `true`, the request refreshes the relevant shards before retrieving the document. Setting it to `true` should be done after careful thought and verification that this does not cause a heavy load on the system (and slow down indexing). - * **`routing` (Optional, string)**: A custom value used to route operations to a specific shard. - * **`_source` (Optional, boolean | string | string[])**: Indicates whether to return the `_source` field (`true` or `false`) or lists the fields to return. - * **`_source_excludes` (Optional, string | string[])**: A list of source fields to exclude from the response. You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. - * **`_source_includes` (Optional, string | string[])**: A list of source fields to include in the response. If this parameter is specified, only these source fields are returned. You can exclude fields from this subset using the `_source_excludes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. - * **`stored_fields` (Optional, string | string[])**: A list of stored fields to return as part of a hit. If no fields are specified, no stored fields are included in the response. If this field is specified, the `_source` parameter defaults to `false`. - * **`version` (Optional, number)**: Explicit version number for concurrency control. The specified version must match the current version of the document for the request to succeed. - * **`version_type` (Optional, Enum("internal" | "external" | "external_gte" | "force"))**: The version type. - - - -## exists_source [_exists_source] +- **`id` (string)**: A unique document identifier. +- **`index` (string)**: A list of data streams, indices, and aliases. It supports wildcards (`*`). +- **`preference` (Optional, string)**: The node or shard the operation should be performed on. By default, the operation is randomized between the shard replicas. If it is set to `_local`, the operation will prefer to be run on a local allocated shard when possible. If it is set to a custom value, the value is used to guarantee that the same shards will be used for the same custom value. This can help with "jumping values" when hitting different shards in different refresh states. A sample value can be something like the web session ID or the user name. +- **`realtime` (Optional, boolean)**: If `true`, the request is real-time as opposed to near-real-time. +- **`refresh` (Optional, boolean)**: If `true`, the request refreshes the relevant shards before retrieving the document. Setting it to `true` should be done after careful thought and verification that this does not cause a heavy load on the system (and slow down indexing). +- **`routing` (Optional, string)**: A custom value used to route operations to a specific shard. +- **`_source` (Optional, boolean | string | string[])**: Indicates whether to return the `_source` field (`true` or `false`) or lists the fields to return. +- **`_source_excludes` (Optional, string | string[])**: A list of source fields to exclude from the response. You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. +- **`_source_includes` (Optional, string | string[])**: A list of source fields to include in the response. If this parameter is specified, only these source fields are returned. You can exclude fields from this subset using the `_source_excludes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. +- **`stored_fields` (Optional, string | string[])**: A list of stored fields to return as part of a hit. If no fields are specified, no stored fields are included in the response. If this field is specified, the `_source` parameter defaults to `false`. +- **`version` (Optional, number)**: Explicit version number for concurrency control. The specified version must match the current version of the document for the request to succeed. +- **`version_type` (Optional, Enum("internal" | "external" | "external_gte" | "force"))**: The version type. +## client.existsSource [_exists_source] Check for a document source. -Check whether a document source exists in an index. For example: +Check whether a document source exists in an index. +For example: ``` HEAD my-index-000001/_source/1 ``` -A document’s source is not available if it is disabled in the mapping. +A document's source is not available if it is disabled in the mapping. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-get) ```ts client.existsSource({ id, index }) ``` +### Arguments [_arguments_exists_source] +#### Request (object) [_request_exists_source] -### Arguments [_arguments_11] - -* **Request (object):** - - * **`id` (string)**: A unique identifier for the document. - * **`index` (string)**: A list of data streams, indices, and aliases. It supports wildcards (`*`). - * **`preference` (Optional, string)**: The node or shard the operation should be performed on. By default, the operation is randomized between the shard replicas. - * **`realtime` (Optional, boolean)**: If `true`, the request is real-time as opposed to near-real-time. - * **`refresh` (Optional, boolean)**: If `true`, the request refreshes the relevant shards before retrieving the document. Setting it to `true` should be done after careful thought and verification that this does not cause a heavy load on the system (and slow down indexing). - * **`routing` (Optional, string)**: A custom value used to route operations to a specific shard. - * **`_source` (Optional, boolean | string | string[])**: Indicates whether to return the `_source` field (`true` or `false`) or lists the fields to return. - * **`_source_excludes` (Optional, string | string[])**: A list of source fields to exclude in the response. - * **`_source_includes` (Optional, string | string[])**: A list of source fields to include in the response. - * **`version` (Optional, number)**: The version number for concurrency control. It must match the current version of the document for the request to succeed. - * **`version_type` (Optional, Enum("internal" | "external" | "external_gte" | "force"))**: The version type. - - +- **`id` (string)**: A unique identifier for the document. +- **`index` (string)**: A list of data streams, indices, and aliases. It supports wildcards (`*`). +- **`preference` (Optional, string)**: The node or shard the operation should be performed on. By default, the operation is randomized between the shard replicas. +- **`realtime` (Optional, boolean)**: If `true`, the request is real-time as opposed to near-real-time. +- **`refresh` (Optional, boolean)**: If `true`, the request refreshes the relevant shards before retrieving the document. Setting it to `true` should be done after careful thought and verification that this does not cause a heavy load on the system (and slow down indexing). +- **`routing` (Optional, string)**: A custom value used to route operations to a specific shard. +- **`_source` (Optional, boolean | string | string[])**: Indicates whether to return the `_source` field (`true` or `false`) or lists the fields to return. +- **`_source_excludes` (Optional, string | string[])**: A list of source fields to exclude in the response. +- **`_source_includes` (Optional, string | string[])**: A list of source fields to include in the response. +- **`version` (Optional, number)**: The version number for concurrency control. It must match the current version of the document for the request to succeed. +- **`version_type` (Optional, Enum("internal" | "external" | "external_gte" | "force"))**: The version type. -## explain [_explain] +## client.explain [_explain] +Explain a document match result. +Get information about why a specific document matches, or doesn't match, a query. +It computes a score explanation for a query and a specific document. -Explain a document match result. Returns information about why a specific document matches, or doesn’t match, a query. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-explain) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-explain) ```ts client.explain({ id, index }) ``` - - -### Arguments [_arguments_12] - -* **Request (object):** - - * **`id` (string)**: Defines the document ID. - * **`index` (string)**: Index names used to limit the request. Only a single index name can be provided to this parameter. - * **`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: Defines the search definition using the Query DSL. - * **`analyzer` (Optional, string)**: Analyzer to use for the query string. This parameter can only be used when the `q` query string parameter is specified. - * **`analyze_wildcard` (Optional, boolean)**: If `true`, wildcard and prefix queries are analyzed. - * **`default_operator` (Optional, Enum("and" | "or"))**: The default operator for query string query: `AND` or `OR`. - * **`df` (Optional, string)**: Field to use as default where no field prefix is given in the query string. - * **`lenient` (Optional, boolean)**: If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. - * **`preference` (Optional, string)**: Specifies the node or shard the operation should be performed on. Random by default. - * **`routing` (Optional, string)**: Custom value used to route operations to a specific shard. - * **`_source` (Optional, boolean | string | string[])**: True or false to return the `_source` field or not, or a list of fields to return. - * **`_source_excludes` (Optional, string | string[])**: A list of source fields to exclude from the response. - * **`_source_includes` (Optional, string | string[])**: A list of source fields to include in the response. - * **`stored_fields` (Optional, string | string[])**: A list of stored fields to return in the response. - * **`q` (Optional, string)**: Query in the Lucene query string syntax. - - - -## field_caps [_field_caps] - +### Arguments [_arguments_explain] + +#### Request (object) [_request_explain] + +- **`id` (string)**: The document identifier. +- **`index` (string)**: Index names that are used to limit the request. Only a single index name can be provided to this parameter. +- **`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: Defines the search definition using the Query DSL. +- **`analyzer` (Optional, string)**: The analyzer to use for the query string. This parameter can be used only when the `q` query string parameter is specified. +- **`analyze_wildcard` (Optional, boolean)**: If `true`, wildcard and prefix queries are analyzed. This parameter can be used only when the `q` query string parameter is specified. +- **`default_operator` (Optional, Enum("and" | "or"))**: The default operator for query string query: `AND` or `OR`. This parameter can be used only when the `q` query string parameter is specified. +- **`df` (Optional, string)**: The field to use as default where no field prefix is given in the query string. This parameter can be used only when the `q` query string parameter is specified. +- **`lenient` (Optional, boolean)**: If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. This parameter can be used only when the `q` query string parameter is specified. +- **`preference` (Optional, string)**: The node or shard the operation should be performed on. It is random by default. +- **`routing` (Optional, string)**: A custom value used to route operations to a specific shard. +- **`_source` (Optional, boolean | string | string[])**: `True` or `false` to return the `_source` field or not or a list of fields to return. +- **`_source_excludes` (Optional, string | string[])**: A list of source fields to exclude from the response. You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. +- **`_source_includes` (Optional, string | string[])**: A list of source fields to include in the response. If this parameter is specified, only these source fields are returned. You can exclude fields from this subset using the `_source_excludes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. +- **`stored_fields` (Optional, string | string[])**: A list of stored fields to return in the response. +- **`q` (Optional, string)**: The query in the Lucene query string syntax. + +## client.fieldCaps [_field_caps] Get the field capabilities. Get information about the capabilities of fields among multiple indices. -For data streams, the API returns field capabilities among the stream’s backing indices. It returns runtime fields like any other field. For example, a runtime field with a type of keyword is returned the same as any other field that belongs to the `keyword` family. +For data streams, the API returns field capabilities among the stream’s backing indices. +It returns runtime fields like any other field. +For example, a runtime field with a type of keyword is returned the same as any other field that belongs to the `keyword` family. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-field-caps) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-field-caps) ```ts client.fieldCaps({ ... }) ``` +### Arguments [_arguments_field_caps] +#### Request (object) [_request_field_caps] -### Arguments [_arguments_13] - -* **Request (object):** - - * **`index` (Optional, string | string[])**: List of data streams, indices, and aliases used to limit the request. Supports wildcards (*). To target all data streams and indices, omit this parameter or use * or _all. - * **`fields` (Optional, string | string[])**: List of fields to retrieve capabilities for. Wildcard (`*`) expressions are supported. - * **`index_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: Allows to filter indices if the provided query rewrites to match_none on every shard. - * **`runtime_mappings` (Optional, Record)**: Defines ad-hoc runtime fields in the request similar to the way it is done in search requests. These fields exist only as part of the query and take precedence over fields defined with the same name in the index mappings. - * **`allow_no_indices` (Optional, boolean)**: If false, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with foo but no index starts with bar. - * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. - * **`ignore_unavailable` (Optional, boolean)**: If `true`, missing or closed indices are not included in the response. - * **`include_unmapped` (Optional, boolean)**: If true, unmapped fields are included in the response. - * **`filters` (Optional, string)**: An optional set of filters: can include +metadata,-metadata,-nested,-multifield,-parent - * **`types` (Optional, string[])**: Only return results for fields that have one of the types in the list - * **`include_empty_fields` (Optional, boolean)**: If false, empty fields are not included in the response. - - - -## get [_get] +- **`index` (Optional, string | string[])**: A list of data streams, indices, and aliases used to limit the request. Supports wildcards (*). To target all data streams and indices, omit this parameter or use * or _all. +- **`fields` (Optional, string | string[])**: A list of fields to retrieve capabilities for. Wildcard (`*`) expressions are supported. +- **`index_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: Filter indices if the provided query rewrites to `match_none` on every shard. IMPORTANT: The filtering is done on a best-effort basis, it uses index statistics and mappings to rewrite queries to `match_none` instead of fully running the request. For instance a range query over a date field can rewrite to `match_none` if all documents within a shard (including deleted documents) are outside of the provided range. However, not all queries can rewrite to `match_none` so this API may return an index even if the provided filter matches no document. +- **`runtime_mappings` (Optional, Record)**: Define ad-hoc runtime fields in the request similar to the way it is done in search requests. These fields exist only as part of the query and take precedence over fields defined with the same name in the index mappings. +- **`allow_no_indices` (Optional, boolean)**: If false, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with foo but no index starts with bar. +- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. +- **`ignore_unavailable` (Optional, boolean)**: If `true`, missing or closed indices are not included in the response. +- **`include_unmapped` (Optional, boolean)**: If true, unmapped fields are included in the response. +- **`filters` (Optional, string)**: A list of filters to apply to the response. +- **`types` (Optional, string[])**: A list of field types to include. Any fields that do not match one of these types will be excluded from the results. It defaults to empty, meaning that all field types are returned. +- **`include_empty_fields` (Optional, boolean)**: If false, empty fields are not included in the response. +## client.get [_get] Get a document by its ID. Get a document and its source or stored fields from an index. -By default, this API is realtime and is not affected by the refresh rate of the index (when data will become visible for search). In the case where stored fields are requested with the `stored_fields` parameter and the document has been updated but is not yet refreshed, the API will have to parse and analyze the source to extract the stored fields. To turn off realtime behavior, set the `realtime` parameter to false. +By default, this API is realtime and is not affected by the refresh rate of the index (when data will become visible for search). +In the case where stored fields are requested with the `stored_fields` parameter and the document has been updated but is not yet refreshed, the API will have to parse and analyze the source to extract the stored fields. +To turn off realtime behavior, set the `realtime` parameter to false. **Source filtering** -By default, the API returns the contents of the `_source` field unless you have used the `stored_fields` parameter or the `_source` field is turned off. You can turn off `_source` retrieval by using the `_source` parameter: +By default, the API returns the contents of the `_source` field unless you have used the `stored_fields` parameter or the `_source` field is turned off. +You can turn off `_source` retrieval by using the `_source` parameter: ``` GET my-index-000001/_doc/0?_source=false ``` -If you only need one or two fields from the `_source`, use the `_source_includes` or `_source_excludes` parameters to include or filter out particular fields. This can be helpful with large documents where partial retrieval can save on network overhead Both parameters take a comma separated list of fields or wildcard expressions. For example: +If you only need one or two fields from the `_source`, use the `_source_includes` or `_source_excludes` parameters to include or filter out particular fields. +This can be helpful with large documents where partial retrieval can save on network overhead +Both parameters take a comma separated list of fields or wildcard expressions. +For example: ``` GET my-index-000001/_doc/0?_source_includes=*.id&_source_excludes=entities @@ -693,102 +708,97 @@ GET my-index-000001/_doc/0?_source=*.id **Routing** -If routing is used during indexing, the routing value also needs to be specified to retrieve a document. For example: +If routing is used during indexing, the routing value also needs to be specified to retrieve a document. +For example: ``` GET my-index-000001/_doc/2?routing=user1 ``` -This request gets the document with ID 2, but it is routed based on the user. The document is not fetched if the correct routing is not specified. +This request gets the document with ID 2, but it is routed based on the user. +The document is not fetched if the correct routing is not specified. **Distributed** -The GET operation is hashed into a specific shard ID. It is then redirected to one of the replicas within that shard ID and returns the result. The replicas are the primary shard and its replicas within that shard ID group. This means that the more replicas you have, the better your GET scaling will be. +The GET operation is hashed into a specific shard ID. +It is then redirected to one of the replicas within that shard ID and returns the result. +The replicas are the primary shard and its replicas within that shard ID group. +This means that the more replicas you have, the better your GET scaling will be. **Versioning support** You can use the `version` parameter to retrieve the document only if its current version is equal to the specified one. -Internally, Elasticsearch has marked the old document as deleted and added an entirely new document. The old version of the document doesn’t disappear immediately, although you won’t be able to access it. Elasticsearch cleans up deleted documents in the background as you continue to index more data. +Internally, Elasticsearch has marked the old document as deleted and added an entirely new document. +The old version of the document doesn't disappear immediately, although you won't be able to access it. +Elasticsearch cleans up deleted documents in the background as you continue to index more data. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-get) ```ts client.get({ id, index }) ``` +### Arguments [_arguments_get] +#### Request (object) [_request_get] -### Arguments [_arguments_14] +- **`id` (string)**: A unique document identifier. +- **`index` (string)**: The name of the index that contains the document. +- **`force_synthetic_source` (Optional, boolean)**: Indicates whether the request forces synthetic `_source`. Use this paramater to test if the mapping supports synthetic `_source` and to get a sense of the worst case performance. Fetches with this parameter enabled will be slower than enabling synthetic source natively in the index. +- **`preference` (Optional, string)**: The node or shard the operation should be performed on. By default, the operation is randomized between the shard replicas. If it is set to `_local`, the operation will prefer to be run on a local allocated shard when possible. If it is set to a custom value, the value is used to guarantee that the same shards will be used for the same custom value. This can help with "jumping values" when hitting different shards in different refresh states. A sample value can be something like the web session ID or the user name. +- **`realtime` (Optional, boolean)**: If `true`, the request is real-time as opposed to near-real-time. +- **`refresh` (Optional, boolean)**: If `true`, the request refreshes the relevant shards before retrieving the document. Setting it to `true` should be done after careful thought and verification that this does not cause a heavy load on the system (and slow down indexing). +- **`routing` (Optional, string)**: A custom value used to route operations to a specific shard. +- **`_source` (Optional, boolean | string | string[])**: Indicates whether to return the `_source` field (`true` or `false`) or lists the fields to return. +- **`_source_excludes` (Optional, string | string[])**: A list of source fields to exclude from the response. You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. +- **`_source_includes` (Optional, string | string[])**: A list of source fields to include in the response. If this parameter is specified, only these source fields are returned. You can exclude fields from this subset using the `_source_excludes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. +- **`stored_fields` (Optional, string | string[])**: A list of stored fields to return as part of a hit. If no fields are specified, no stored fields are included in the response. If this field is specified, the `_source` parameter defaults to `false`. Only leaf fields can be retrieved with the `stored_field` option. Object fields can't be returned;if specified, the request fails. +- **`version` (Optional, number)**: The version number for concurrency control. It must match the current version of the document for the request to succeed. +- **`version_type` (Optional, Enum("internal" | "external" | "external_gte" | "force"))**: The version type. -* **Request (object):** +## client.getScript [_get_script] +Get a script or search template. +Retrieves a stored script or search template. - * **`id` (string)**: A unique document identifier. - * **`index` (string)**: The name of the index that contains the document. - * **`force_synthetic_source` (Optional, boolean)**: Indicates whether the request forces synthetic `_source`. Use this paramater to test if the mapping supports synthetic `_source` and to get a sense of the worst case performance. Fetches with this parameter enabled will be slower than enabling synthetic source natively in the index. - * **`preference` (Optional, string)**: The node or shard the operation should be performed on. By default, the operation is randomized between the shard replicas. If it is set to `_local`, the operation will prefer to be run on a local allocated shard when possible. If it is set to a custom value, the value is used to guarantee that the same shards will be used for the same custom value. This can help with "jumping values" when hitting different shards in different refresh states. A sample value can be something like the web session ID or the user name. - * **`realtime` (Optional, boolean)**: If `true`, the request is real-time as opposed to near-real-time. - * **`refresh` (Optional, boolean)**: If `true`, the request refreshes the relevant shards before retrieving the document. Setting it to `true` should be done after careful thought and verification that this does not cause a heavy load on the system (and slow down indexing). - * **`routing` (Optional, string)**: A custom value used to route operations to a specific shard. - * **`_source` (Optional, boolean | string | string[])**: Indicates whether to return the `_source` field (`true` or `false`) or lists the fields to return. - * **`_source_excludes` (Optional, string | string[])**: A list of source fields to exclude from the response. You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. - * **`_source_includes` (Optional, string | string[])**: A list of source fields to include in the response. If this parameter is specified, only these source fields are returned. You can exclude fields from this subset using the `_source_excludes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. - * **`stored_fields` (Optional, string | string[])**: A list of stored fields to return as part of a hit. If no fields are specified, no stored fields are included in the response. If this field is specified, the `_source` parameter defaults to `false`. Only leaf fields can be retrieved with the `stored_field` option. Object fields can’t be returned;if specified, the request fails. - * **`version` (Optional, number)**: The version number for concurrency control. It must match the current version of the document for the request to succeed. - * **`version_type` (Optional, Enum("internal" | "external" | "external_gte" | "force"))**: The version type. - - - -## get_script [_get_script] - -Get a script or search template. Retrieves a stored script or search template. - -[Endpoint documentation](docs-content://explore-analyze/scripting.md) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-get-script) ```ts client.getScript({ id }) ``` +### Arguments [_arguments_get_script] +#### Request (object) [_request_get_script] -### Arguments [_arguments_15] - -* **Request (object):** - - * **`id` (string)**: Identifier for the stored script or search template. - * **`master_timeout` (Optional, string | -1 | 0)**: Specify timeout for connection to master - - - -## get_script_context [_get_script_context] +- **`id` (string)**: The identifier for the stored script or search template. +- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout. +## client.getScriptContext [_get_script_context] Get script contexts. Get a list of supported script contexts and their methods. -[Endpoint documentation](elasticsearch://reference/scripting-languages/painless/painless-contexts.md) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-get-script-context) ```ts client.getScriptContext() ``` - -## get_script_languages [_get_script_languages] - +## client.getScriptLanguages [_get_script_languages] Get script languages. Get a list of available script types, languages, and contexts. -[Endpoint documentation](docs-content://explore-analyze/scripting.md) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-get-script-languages) ```ts client.getScriptLanguages() ``` +## client.getSource [_get_source] +Get a document's source. -## get_source [_get_source] - -Get a document’s source. - -Get the source of a document. For example: +Get the source of a document. +For example: ``` GET my-index-000001/_source/1 @@ -800,77 +810,69 @@ You can use the source filtering parameters to control which parts of the `_sour GET my-index-000001/_source/1/?_source_includes=*.id&_source_excludes=entities ``` -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-get) ```ts client.getSource({ id, index }) ``` +### Arguments [_arguments_get_source] +#### Request (object) [_request_get_source] -### Arguments [_arguments_16] - -* **Request (object):** +- **`id` (string)**: A unique document identifier. +- **`index` (string)**: The name of the index that contains the document. +- **`preference` (Optional, string)**: The node or shard the operation should be performed on. By default, the operation is randomized between the shard replicas. +- **`realtime` (Optional, boolean)**: If `true`, the request is real-time as opposed to near-real-time. +- **`refresh` (Optional, boolean)**: If `true`, the request refreshes the relevant shards before retrieving the document. Setting it to `true` should be done after careful thought and verification that this does not cause a heavy load on the system (and slow down indexing). +- **`routing` (Optional, string)**: A custom value used to route operations to a specific shard. +- **`_source` (Optional, boolean | string | string[])**: Indicates whether to return the `_source` field (`true` or `false`) or lists the fields to return. +- **`_source_excludes` (Optional, string | string[])**: A list of source fields to exclude in the response. +- **`_source_includes` (Optional, string | string[])**: A list of source fields to include in the response. +- **`stored_fields` (Optional, string | string[])**: A list of stored fields to return as part of a hit. +- **`version` (Optional, number)**: The version number for concurrency control. It must match the current version of the document for the request to succeed. +- **`version_type` (Optional, Enum("internal" | "external" | "external_gte" | "force"))**: The version type. - * **`id` (string)**: A unique document identifier. - * **`index` (string)**: The name of the index that contains the document. - * **`preference` (Optional, string)**: The node or shard the operation should be performed on. By default, the operation is randomized between the shard replicas. - * **`realtime` (Optional, boolean)**: If `true`, the request is real-time as opposed to near-real-time. - * **`refresh` (Optional, boolean)**: If `true`, the request refreshes the relevant shards before retrieving the document. Setting it to `true` should be done after careful thought and verification that this does not cause a heavy load on the system (and slow down indexing). - * **`routing` (Optional, string)**: A custom value used to route operations to a specific shard. - * **`_source` (Optional, boolean | string | string[])**: Indicates whether to return the `_source` field (`true` or `false`) or lists the fields to return. - * **`_source_excludes` (Optional, string | string[])**: A list of source fields to exclude in the response. - * **`_source_includes` (Optional, string | string[])**: A list of source fields to include in the response. - * **`stored_fields` (Optional, string | string[])**: A list of stored fields to return as part of a hit. - * **`version` (Optional, number)**: The version number for concurrency control. It must match the current version of the document for the request to succeed. - * **`version_type` (Optional, Enum("internal" | "external" | "external_gte" | "force"))**: The version type. +## client.healthReport [_health_report] +Get the cluster health. +Get a report with the health status of an Elasticsearch cluster. +The report contains a list of indicators that compose Elasticsearch functionality. - - -## health_report [_health_report] - -Get the cluster health. Get a report with the health status of an Elasticsearch cluster. The report contains a list of indicators that compose Elasticsearch functionality. - -Each indicator has a health status of: green, unknown, yellow or red. The indicator will provide an explanation and metadata describing the reason for its current health status. +Each indicator has a health status of: green, unknown, yellow or red. +The indicator will provide an explanation and metadata describing the reason for its current health status. The cluster’s status is controlled by the worst indicator status. -In the event that an indicator’s status is non-green, a list of impacts may be present in the indicator result which detail the functionalities that are negatively affected by the health issue. Each impact carries with it a severity level, an area of the system that is affected, and a simple description of the impact on the system. +In the event that an indicator’s status is non-green, a list of impacts may be present in the indicator result which detail the functionalities that are negatively affected by the health issue. +Each impact carries with it a severity level, an area of the system that is affected, and a simple description of the impact on the system. -Some health indicators can determine the root cause of a health problem and prescribe a set of steps that can be performed in order to improve the health of the system. The root cause and remediation steps are encapsulated in a diagnosis. A diagnosis contains a cause detailing a root cause analysis, an action containing a brief description of the steps to take to fix the problem, the list of affected resources (if applicable), and a detailed step-by-step troubleshooting guide to fix the diagnosed problem. +Some health indicators can determine the root cause of a health problem and prescribe a set of steps that can be performed in order to improve the health of the system. +The root cause and remediation steps are encapsulated in a diagnosis. +A diagnosis contains a cause detailing a root cause analysis, an action containing a brief description of the steps to take to fix the problem, the list of affected resources (if applicable), and a detailed step-by-step troubleshooting guide to fix the diagnosed problem. -::::{note} -The health indicators perform root cause analysis of non-green health statuses. This can be computationally expensive when called frequently. When setting up automated polling of the API for health status, set verbose to false to disable the more expensive analysis logic. -:::: +NOTE: The health indicators perform root cause analysis of non-green health statuses. This can be computationally expensive when called frequently. +When setting up automated polling of the API for health status, set verbose to false to disable the more expensive analysis logic. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-health-report) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-health-report) ```ts client.healthReport({ ... }) ``` +### Arguments [_arguments_health_report] +#### Request (object) [_request_health_report] -### Arguments [_arguments_17] - -* **Request (object):** - - * **`feature` (Optional, string | string[])**: A feature of the cluster, as returned by the top-level health report API. - * **`timeout` (Optional, string | -1 | 0)**: Explicit operation timeout. - * **`verbose` (Optional, boolean)**: Opt-in for more information about the health of the system. - * **`size` (Optional, number)**: Limit the number of affected resources the health report API returns. - - - -## index [_index] +- **`feature` (Optional, string | string[])**: A feature of the cluster, as returned by the top-level health report API. +- **`timeout` (Optional, string | -1 | 0)**: Explicit operation timeout. +- **`verbose` (Optional, boolean)**: Opt-in for more information about the health of the system. +- **`size` (Optional, number)**: Limit the number of affected resources the health report API returns. +## client.index [_index] Create or update a document in an index. -Add a JSON document to the specified data stream or index and make it searchable. If the target is an index and the document already exists, the request updates the document and increments its version. - -::::{note} -You cannot use this API to send update requests for existing documents in a data stream. -:::: +Add a JSON document to the specified data stream or index and make it searchable. +If the target is an index and the document already exists, the request updates the document and increments its version. +NOTE: You cannot use this API to send update requests for existing documents in a data stream. If the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or index alias: @@ -880,76 +882,96 @@ If the Elasticsearch security features are enabled, you must have the following Automatic data stream creation requires a matching index template with data stream enabled. -::::{note} -Replica shards might not all be started when an indexing operation returns successfully. By default, only the primary is required. Set `wait_for_active_shards` to change this default behavior. -:::: - +NOTE: Replica shards might not all be started when an indexing operation returns successfully. +By default, only the primary is required. Set `wait_for_active_shards` to change this default behavior. **Automatically create data streams and indices** -If the request’s target doesn’t exist and matches an index template with a `data_stream` definition, the index operation automatically creates the data stream. +If the request's target doesn't exist and matches an index template with a `data_stream` definition, the index operation automatically creates the data stream. -If the target doesn’t exist and doesn’t match a data stream template, the operation automatically creates the index and applies any matching index templates. +If the target doesn't exist and doesn't match a data stream template, the operation automatically creates the index and applies any matching index templates. -::::{note} -Elasticsearch includes several built-in index templates. To avoid naming collisions with these templates, refer to index pattern documentation. -:::: +NOTE: Elasticsearch includes several built-in index templates. To avoid naming collisions with these templates, refer to index pattern documentation. +If no mapping exists, the index operation creates a dynamic mapping. +By default, new fields and objects are automatically added to the mapping if needed. -If no mapping exists, the index operation creates a dynamic mapping. By default, new fields and objects are automatically added to the mapping if needed. - -Automatic index creation is controlled by the `action.auto_create_index` setting. If it is `true`, any index can be created automatically. You can modify this setting to explicitly allow or block automatic creation of indices that match specified patterns or set it to `false` to turn off automatic index creation entirely. Specify a list of patterns you want to allow or prefix each pattern with `+` or `-` to indicate whether it should be allowed or blocked. When a list is specified, the default behaviour is to disallow. - -::::{note} -The `action.auto_create_index` setting affects the automatic creation of indices only. It does not affect the creation of data streams. -:::: +Automatic index creation is controlled by the `action.auto_create_index` setting. +If it is `true`, any index can be created automatically. +You can modify this setting to explicitly allow or block automatic creation of indices that match specified patterns or set it to `false` to turn off automatic index creation entirely. +Specify a list of patterns you want to allow or prefix each pattern with `+` or `-` to indicate whether it should be allowed or blocked. +When a list is specified, the default behaviour is to disallow. +NOTE: The `action.auto_create_index` setting affects the automatic creation of indices only. +It does not affect the creation of data streams. **Optimistic concurrency control** -Index operations can be made conditional and only be performed if the last modification to the document was assigned the sequence number and primary term specified by the `if_seq_no` and `if_primary_term` parameters. If a mismatch is detected, the operation will result in a `VersionConflictException` and a status code of `409`. +Index operations can be made conditional and only be performed if the last modification to the document was assigned the sequence number and primary term specified by the `if_seq_no` and `if_primary_term` parameters. +If a mismatch is detected, the operation will result in a `VersionConflictException` and a status code of `409`. **Routing** -By default, shard placement — or routing — is controlled by using a hash of the document’s ID value. For more explicit control, the value fed into the hash function used by the router can be directly specified on a per-operation basis using the `routing` parameter. +By default, shard placement — or routing — is controlled by using a hash of the document's ID value. +For more explicit control, the value fed into the hash function used by the router can be directly specified on a per-operation basis using the `routing` parameter. -When setting up explicit mapping, you can also use the `_routing` field to direct the index operation to extract the routing value from the document itself. This does come at the (very minimal) cost of an additional document parsing pass. If the `_routing` mapping is defined and set to be required, the index operation will fail if no routing value is provided or extracted. - -::::{note} -Data streams do not support custom routing unless they were created with the `allow_custom_routing` setting enabled in the template. -:::: +When setting up explicit mapping, you can also use the `_routing` field to direct the index operation to extract the routing value from the document itself. +This does come at the (very minimal) cost of an additional document parsing pass. +If the `_routing` mapping is defined and set to be required, the index operation will fail if no routing value is provided or extracted. +NOTE: Data streams do not support custom routing unless they were created with the `allow_custom_routing` setting enabled in the template. **Distributed** -The index operation is directed to the primary shard based on its route and performed on the actual node containing this shard. After the primary shard completes the operation, if needed, the update is distributed to applicable replicas. +The index operation is directed to the primary shard based on its route and performed on the actual node containing this shard. +After the primary shard completes the operation, if needed, the update is distributed to applicable replicas. **Active shards** -To improve the resiliency of writes to the system, indexing operations can be configured to wait for a certain number of active shard copies before proceeding with the operation. If the requisite number of active shard copies are not available, then the write operation must wait and retry, until either the requisite shard copies have started or a timeout occurs. By default, write operations only wait for the primary shards to be active before proceeding (that is to say `wait_for_active_shards` is `1`). This default can be overridden in the index settings dynamically by setting `index.write.wait_for_active_shards`. To alter this behavior per operation, use the `wait_for_active_shards request` parameter. +To improve the resiliency of writes to the system, indexing operations can be configured to wait for a certain number of active shard copies before proceeding with the operation. +If the requisite number of active shard copies are not available, then the write operation must wait and retry, until either the requisite shard copies have started or a timeout occurs. +By default, write operations only wait for the primary shards to be active before proceeding (that is to say `wait_for_active_shards` is `1`). +This default can be overridden in the index settings dynamically by setting `index.write.wait_for_active_shards`. +To alter this behavior per operation, use the `wait_for_active_shards request` parameter. -Valid values are all or any positive integer up to the total number of configured copies per shard in the index (which is `number_of_replicas`+1). Specifying a negative value or a number greater than the number of shard copies will throw an error. +Valid values are all or any positive integer up to the total number of configured copies per shard in the index (which is `number_of_replicas`+1). +Specifying a negative value or a number greater than the number of shard copies will throw an error. -For example, suppose you have a cluster of three nodes, A, B, and C and you create an index index with the number of replicas set to 3 (resulting in 4 shard copies, one more copy than there are nodes). If you attempt an indexing operation, by default the operation will only ensure the primary copy of each shard is available before proceeding. This means that even if B and C went down and A hosted the primary shard copies, the indexing operation would still proceed with only one copy of the data. If `wait_for_active_shards` is set on the request to `3` (and all three nodes are up), the indexing operation will require 3 active shard copies before proceeding. This requirement should be met because there are 3 active nodes in the cluster, each one holding a copy of the shard. However, if you set `wait_for_active_shards` to `all` (or to `4`, which is the same in this situation), the indexing operation will not proceed as you do not have all 4 copies of each shard active in the index. The operation will timeout unless a new node is brought up in the cluster to host the fourth copy of the shard. +For example, suppose you have a cluster of three nodes, A, B, and C and you create an index index with the number of replicas set to 3 (resulting in 4 shard copies, one more copy than there are nodes). +If you attempt an indexing operation, by default the operation will only ensure the primary copy of each shard is available before proceeding. +This means that even if B and C went down and A hosted the primary shard copies, the indexing operation would still proceed with only one copy of the data. +If `wait_for_active_shards` is set on the request to `3` (and all three nodes are up), the indexing operation will require 3 active shard copies before proceeding. +This requirement should be met because there are 3 active nodes in the cluster, each one holding a copy of the shard. +However, if you set `wait_for_active_shards` to `all` (or to `4`, which is the same in this situation), the indexing operation will not proceed as you do not have all 4 copies of each shard active in the index. +The operation will timeout unless a new node is brought up in the cluster to host the fourth copy of the shard. -It is important to note that this setting greatly reduces the chances of the write operation not writing to the requisite number of shard copies, but it does not completely eliminate the possibility, because this check occurs before the write operation starts. After the write operation is underway, it is still possible for replication to fail on any number of shard copies but still succeed on the primary. The `_shards` section of the API response reveals the number of shard copies on which replication succeeded and failed. +It is important to note that this setting greatly reduces the chances of the write operation not writing to the requisite number of shard copies, but it does not completely eliminate the possibility, because this check occurs before the write operation starts. +After the write operation is underway, it is still possible for replication to fail on any number of shard copies but still succeed on the primary. +The `_shards` section of the API response reveals the number of shard copies on which replication succeeded and failed. **No operation (noop) updates** -When updating a document by using this API, a new version of the document is always created even if the document hasn’t changed. If this isn’t acceptable use the `_update` API with `detect_noop` set to `true`. The `detect_noop` option isn’t available on this API because it doesn’t fetch the old source and isn’t able to compare it against the new source. +When updating a document by using this API, a new version of the document is always created even if the document hasn't changed. +If this isn't acceptable use the `_update` API with `detect_noop` set to `true`. +The `detect_noop` option isn't available on this API because it doesn’t fetch the old source and isn't able to compare it against the new source. -There isn’t a definitive rule for when noop updates aren’t acceptable. It’s a combination of lots of factors like how frequently your data source sends updates that are actually noops and how many queries per second Elasticsearch runs on the shard receiving the updates. +There isn't a definitive rule for when noop updates aren't acceptable. +It's a combination of lots of factors like how frequently your data source sends updates that are actually noops and how many queries per second Elasticsearch runs on the shard receiving the updates. **Versioning** -Each indexed document is given a version number. By default, internal versioning is used that starts at 1 and increments with each update, deletes included. Optionally, the version number can be set to an external value (for example, if maintained in a database). To enable this functionality, `version_type` should be set to `external`. The value provided must be a numeric, long value greater than or equal to 0, and less than around `9.2e+18`. - -::::{note} -Versioning is completely real time, and is not affected by the near real time aspects of search operations. If no version is provided, the operation runs without any version checks. -:::: +Each indexed document is given a version number. +By default, internal versioning is used that starts at 1 and increments with each update, deletes included. +Optionally, the version number can be set to an external value (for example, if maintained in a database). +To enable this functionality, `version_type` should be set to `external`. +The value provided must be a numeric, long value greater than or equal to 0, and less than around `9.2e+18`. +NOTE: Versioning is completely real time, and is not affected by the near real time aspects of search operations. +If no version is provided, the operation runs without any version checks. -When using the external version type, the system checks to see if the version number passed to the index request is greater than the version of the currently stored document. If true, the document will be indexed and the new version number used. If the value provided is less than or equal to the stored document’s version number, a version conflict will occur and the index operation will fail. For example: +When using the external version type, the system checks to see if the version number passed to the index request is greater than the version of the currently stored document. +If true, the document will be indexed and the new version number used. +If the value provided is less than or equal to the stored document's version number, a version conflict will occur and the index operation will fail. For example: ``` PUT my-index-000001/_doc/1?version=2&version_type=external @@ -958,125 +980,103 @@ PUT my-index-000001/_doc/1?version=2&version_type=external "id": "elkbee" } } -``` -In this example, the operation will succeed since the supplied version of 2 is higher than the current document version of 1. If the document was already updated and its version was set to 2 or higher, the indexing command will fail and result in a conflict (409 HTTP status code). +In this example, the operation will succeed since the supplied version of 2 is higher than the current document version of 1. +If the document was already updated and its version was set to 2 or higher, the indexing command will fail and result in a conflict (409 HTTP status code). -A nice side effect is that there is no need to maintain strict ordering of async indexing operations run as a result of changes to a source database, as long as version numbers from the source database are used. Even the simple case of updating the Elasticsearch index using data from a database is simplified if external versioning is used, as only the latest version will be used if the index operations arrive out of order. +A nice side effect is that there is no need to maintain strict ordering of async indexing operations run as a result of changes to a source database, as long as version numbers from the source database are used. +Even the simple case of updating the Elasticsearch index using data from a database is simplified if external versioning is used, as only the latest version will be used if the index operations arrive out of order. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-create) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-create) ```ts client.index({ index }) ``` +### Arguments [_arguments_index] +#### Request (object) [_request_index] -### Arguments [_arguments_18] - -* **Request (object):** - - * **`index` (string)**: The name of the data stream or index to target. If the target doesn’t exist and matches the name or wildcard (`*`) pattern of an index template with a `data_stream` definition, this request creates the data stream. If the target doesn’t exist and doesn’t match a data stream template, this request creates the index. You can check for existing targets with the resolve index API. - * **`id` (Optional, string)**: A unique identifier for the document. To automatically generate a document ID, use the `POST //_doc/` request format and omit this parameter. - * **`document` (Optional, object)**: A document. - * **`if_primary_term` (Optional, number)**: Only perform the operation if the document has this primary term. - * **`if_seq_no` (Optional, number)**: Only perform the operation if the document has this sequence number. - * **`op_type` (Optional, Enum("index" | "create"))**: Set to `create` to only index the document if it does not already exist (put if absent). If a document with the specified `_id` already exists, the indexing operation will fail. The behavior is the same as using the `/_create` endpoint. If a document ID is specified, this paramater defaults to `index`. Otherwise, it defaults to `create`. If the request targets a data stream, an `op_type` of `create` is required. - * **`pipeline` (Optional, string)**: The ID of the pipeline to use to preprocess incoming documents. If the index has a default ingest pipeline specified, then setting the value to `_none` disables the default ingest pipeline for this request. If a final pipeline is configured it will always run, regardless of the value of this parameter. - * **`refresh` (Optional, Enum(true | false | "wait_for"))**: If `true`, Elasticsearch refreshes the affected shards to make this operation visible to search. If `wait_for`, it waits for a refresh to make this operation visible to search. If `false`, it does nothing with refreshes. - * **`routing` (Optional, string)**: A custom value that is used to route operations to a specific shard. - * **`timeout` (Optional, string | -1 | 0)**: The period the request waits for the following operations: automatic index creation, dynamic mapping updates, waiting for active shards. This parameter is useful for situations where the primary shard assigned to perform the operation might not be available when the operation runs. Some reasons for this might be that the primary shard is currently recovering from a gateway or undergoing relocation. By default, the operation will wait on the primary shard to become available for at least 1 minute before failing and responding with an error. The actual wait time could be longer, particularly when multiple waits occur. - * **`version` (Optional, number)**: An explicit version number for concurrency control. It must be a non-negative long number. - * **`version_type` (Optional, Enum("internal" | "external" | "external_gte" | "force"))**: The version type. - * **`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))**: The number of shard copies that must be active before proceeding with the operation. You can set it to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). The default value of `1` means it waits for each primary shard to be active. - * **`require_alias` (Optional, boolean)**: If `true`, the destination must be an index alias. - +- **`index` (string)**: The name of the data stream or index to target. If the target doesn't exist and matches the name or wildcard (`*`) pattern of an index template with a `data_stream` definition, this request creates the data stream. If the target doesn't exist and doesn't match a data stream template, this request creates the index. You can check for existing targets with the resolve index API. +- **`id` (Optional, string)**: A unique identifier for the document. To automatically generate a document ID, use the `POST //_doc/` request format and omit this parameter. +- **`document` (Optional, object)**: A document. +- **`if_primary_term` (Optional, number)**: Only perform the operation if the document has this primary term. +- **`if_seq_no` (Optional, number)**: Only perform the operation if the document has this sequence number. +- **`include_source_on_error` (Optional, boolean)**: True or false if to include the document source in the error message in case of parsing errors. +- **`op_type` (Optional, Enum("index" | "create"))**: Set to `create` to only index the document if it does not already exist (put if absent). If a document with the specified `_id` already exists, the indexing operation will fail. The behavior is the same as using the `/_create` endpoint. If a document ID is specified, this paramater defaults to `index`. Otherwise, it defaults to `create`. If the request targets a data stream, an `op_type` of `create` is required. +- **`pipeline` (Optional, string)**: The ID of the pipeline to use to preprocess incoming documents. If the index has a default ingest pipeline specified, then setting the value to `_none` disables the default ingest pipeline for this request. If a final pipeline is configured it will always run, regardless of the value of this parameter. +- **`refresh` (Optional, Enum(true | false | "wait_for"))**: If `true`, Elasticsearch refreshes the affected shards to make this operation visible to search. If `wait_for`, it waits for a refresh to make this operation visible to search. If `false`, it does nothing with refreshes. +- **`routing` (Optional, string)**: A custom value that is used to route operations to a specific shard. +- **`timeout` (Optional, string | -1 | 0)**: The period the request waits for the following operations: automatic index creation, dynamic mapping updates, waiting for active shards. This parameter is useful for situations where the primary shard assigned to perform the operation might not be available when the operation runs. Some reasons for this might be that the primary shard is currently recovering from a gateway or undergoing relocation. By default, the operation will wait on the primary shard to become available for at least 1 minute before failing and responding with an error. The actual wait time could be longer, particularly when multiple waits occur. +- **`version` (Optional, number)**: An explicit version number for concurrency control. It must be a non-negative long number. +- **`version_type` (Optional, Enum("internal" | "external" | "external_gte" | "force"))**: The version type. +- **`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))**: The number of shard copies that must be active before proceeding with the operation. You can set it to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). The default value of `1` means it waits for each primary shard to be active. +- **`require_alias` (Optional, boolean)**: If `true`, the destination must be an index alias. +## client.info [_info] +Get cluster info. +Get basic build, version, and cluster information. -## info [_info] - -Get cluster info. Get basic build, version, and cluster information. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-info) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/group/endpoint-info) ```ts client.info() ``` +## client.knnSearch [_knn_search] +Performs a kNN search. -## knn_search [_knn_search] - -Run a knn search. - -::::{note} -The kNN search API has been replaced by the `knn` option in the search API. -:::: - - -Perform a k-nearest neighbor (kNN) search on a dense_vector field and return the matching documents. Given a query vector, the API finds the k closest vectors and returns those documents as search hits. - -Elasticsearch uses the HNSW algorithm to support efficient kNN search. Like most kNN algorithms, HNSW is an approximate method that sacrifices result accuracy for improved search speed. This means the results returned are not always the true k closest neighbors. - -The kNN search API supports restricting the search using a filter. The search will return the top k documents that also match the filter query. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search) +[Endpoint documentation](https://www.elastic.co/guide/en/elasticsearch/reference/master/search-search.html) ```ts -client.knnSearch({ index, knn }) +client.knnSearch() ``` +## client.mget [_mget] +Get multiple documents. -### Arguments [_arguments_19] - -* **Request (object):** - - * **`index` (string | string[])**: A list of index names to search; use `_all` or to perform the operation on all indices - * **`knn` ({ field, query_vector, k, num_candidates })**: kNN query to execute - * **`_source` (Optional, boolean | { excludes, includes })**: Indicates which source fields are returned for matching documents. These fields are returned in the hits._source property of the search response. - * **`docvalue_fields` (Optional, { field, format, include_unmapped }[])**: The request returns doc values for field names matching these patterns in the hits.fields property of the response. Accepts wildcard (*) patterns. - * **`stored_fields` (Optional, string | string[])**: List of stored fields to return as part of a hit. If no fields are specified, no stored fields are included in the response. If this field is specified, the _source parameter defaults to false. You can pass _source: true to return both source fields and stored fields in the search response. - * **`fields` (Optional, string | string[])**: The request returns values for field names matching these patterns in the hits.fields property of the response. Accepts wildcard (*) patterns. - * **`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type } | { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type }[])**: Query to filter the documents that can match. The kNN search will return the top `k` documents that also match this filter. The value can be a single query or a list of queries. If `filter` isn’t provided, all documents are allowed to match. - * **`routing` (Optional, string)**: A list of specific routing values - +Get multiple JSON documents by ID from one or more indices. +If you specify an index in the request URI, you only need to specify the document IDs in the request body. +To ensure fast responses, this multi get (mget) API responds with partial results if one or more shards fail. +**Filter source fields** -## mget [_mget] +By default, the `_source` field is returned for every document (if stored). +Use the `_source` and `_source_include` or `source_exclude` attributes to filter what fields are returned for a particular document. +You can include the `_source`, `_source_includes`, and `_source_excludes` query parameters in the request URI to specify the defaults to use when there are no per-document instructions. -Get multiple documents. +**Get stored fields** -Get multiple JSON documents by ID from one or more indices. If you specify an index in the request URI, you only need to specify the document IDs in the request body. To ensure fast responses, this multi get (mget) API responds with partial results if one or more shards fail. +Use the `stored_fields` attribute to specify the set of stored fields you want to retrieve. +Any requested fields that are not stored are ignored. +You can include the `stored_fields` query parameter in the request URI to specify the defaults to use when there are no per-document instructions. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-mget) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-mget) ```ts client.mget({ ... }) ``` +### Arguments [_arguments_mget] +#### Request (object) [_request_mget] -### Arguments [_arguments_20] - -* **Request (object):** - - * **`index` (Optional, string)**: Name of the index to retrieve documents from when `ids` are specified, or when a document in the `docs` array does not specify an index. - * **`docs` (Optional, { _id, _index, routing, _source, stored_fields, version, version_type }[])**: The documents you want to retrieve. Required if no index is specified in the request URI. - * **`ids` (Optional, string | string[])**: The IDs of the documents you want to retrieve. Allowed when the index is specified in the request URI. - * **`force_synthetic_source` (Optional, boolean)**: Should this request force synthetic _source? Use this to test if the mapping supports synthetic _source and to get a sense of the worst case performance. Fetches with this enabled will be slower the enabling synthetic source natively in the index. - * **`preference` (Optional, string)**: Specifies the node or shard the operation should be performed on. Random by default. - * **`realtime` (Optional, boolean)**: If `true`, the request is real-time as opposed to near-real-time. - * **`refresh` (Optional, boolean)**: If `true`, the request refreshes relevant shards before retrieving documents. - * **`routing` (Optional, string)**: Custom value used to route operations to a specific shard. - * **`_source` (Optional, boolean | string | string[])**: True or false to return the `_source` field or not, or a list of fields to return. - * **`_source_excludes` (Optional, string | string[])**: A list of source fields to exclude from the response. You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. - * **`_source_includes` (Optional, string | string[])**: A list of source fields to include in the response. If this parameter is specified, only these source fields are returned. You can exclude fields from this subset using the `_source_excludes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. - * **`stored_fields` (Optional, string | string[])**: If `true`, retrieves the document fields stored in the index rather than the document `_source`. - - - -## msearch [_msearch] +- **`index` (Optional, string)**: Name of the index to retrieve documents from when `ids` are specified, or when a document in the `docs` array does not specify an index. +- **`docs` (Optional, { _id, _index, routing, _source, stored_fields, version, version_type }[])**: The documents you want to retrieve. Required if no index is specified in the request URI. +- **`ids` (Optional, string | string[])**: The IDs of the documents you want to retrieve. Allowed when the index is specified in the request URI. +- **`force_synthetic_source` (Optional, boolean)**: Should this request force synthetic _source? Use this to test if the mapping supports synthetic _source and to get a sense of the worst case performance. Fetches with this enabled will be slower the enabling synthetic source natively in the index. +- **`preference` (Optional, string)**: Specifies the node or shard the operation should be performed on. Random by default. +- **`realtime` (Optional, boolean)**: If `true`, the request is real-time as opposed to near-real-time. +- **`refresh` (Optional, boolean)**: If `true`, the request refreshes relevant shards before retrieving documents. +- **`routing` (Optional, string)**: Custom value used to route operations to a specific shard. +- **`_source` (Optional, boolean | string | string[])**: True or false to return the `_source` field or not, or a list of fields to return. +- **`_source_excludes` (Optional, string | string[])**: A list of source fields to exclude from the response. You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. +- **`_source_includes` (Optional, string | string[])**: A list of source fields to include in the response. If this parameter is specified, only these source fields are returned. You can exclude fields from this subset using the `_source_excludes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. +- **`stored_fields` (Optional, string | string[])**: If `true`, retrieves the document fields stored in the index rather than the document `_source`. +## client.msearch [_msearch] Run multiple searches. -The format of the request is similar to the bulk API format and makes use of the newline delimited JSON (NDJSON) format. The structure is as follows: +The format of the request is similar to the bulk API format and makes use of the newline delimited JSON (NDJSON) format. +The structure is as follows: ``` header\n @@ -1087,224 +1087,229 @@ body\n This structure is specifically optimized to reduce parsing if a specific search ends up redirected to another node. -::::{important} -The final line of data must end with a newline character `\n`. Each newline character may be preceded by a carriage return `\r`. When sending requests to this endpoint the `Content-Type` header should be set to `application/x-ndjson`. -:::: - +IMPORTANT: The final line of data must end with a newline character `\n`. +Each newline character may be preceded by a carriage return `\r`. +When sending requests to this endpoint the `Content-Type` header should be set to `application/x-ndjson`. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-msearch) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-msearch) ```ts client.msearch({ ... }) ``` +### Arguments [_arguments_msearch] + +#### Request (object) [_request_msearch] + +- **`index` (Optional, string | string[])**: List of data streams, indices, and index aliases to search. +- **`searches` (Optional, { allow_no_indices, expand_wildcards, ignore_unavailable, index, preference, request_cache, routing, search_type, ccs_minimize_roundtrips, allow_partial_search_results, ignore_throttled } | { aggregations, collapse, explain, ext, from, highlight, track_total_hits, indices_boost, docvalue_fields, knn, rank, min_score, post_filter, profile, query, rescore, retriever, script_fields, search_after, size, slice, sort, _source, fields, suggest, terminate_after, timeout, track_scores, version, seq_no_primary_term, stored_fields, pit, runtime_mappings, stats }[])** +- **`allow_no_indices` (Optional, boolean)**: If false, the request returns an error if any wildcard expression, index alias, or _all value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting foo*,bar* returns an error if an index starts with foo but no index starts with bar. +- **`ccs_minimize_roundtrips` (Optional, boolean)**: If true, network roundtrips between the coordinating node and remote clusters are minimized for cross-cluster search requests. +- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard expressions can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +- **`ignore_throttled` (Optional, boolean)**: If true, concrete, expanded or aliased indices are ignored when frozen. +- **`ignore_unavailable` (Optional, boolean)**: If true, missing or closed indices are not included in the response. +- **`include_named_queries_score` (Optional, boolean)**: Indicates whether hit.matched_queries should be rendered as a map that includes the name of the matched query associated with its score (true) or as an array containing the name of the matched queries (false) This functionality reruns each named query on every hit in a search response. Typically, this adds a small overhead to a request. However, using computationally expensive named queries on a large number of hits may add significant overhead. +- **`max_concurrent_searches` (Optional, number)**: Maximum number of concurrent searches the multi search API can execute. Defaults to `max(1, (# of data nodes * min(search thread pool size, 10)))`. +- **`max_concurrent_shard_requests` (Optional, number)**: Maximum number of concurrent shard requests that each sub-search request executes per node. +- **`pre_filter_shard_size` (Optional, number)**: Defines a threshold that enforces a pre-filter roundtrip to prefilter search shards based on query rewriting if the number of shards the search request expands to exceeds the threshold. This filter roundtrip can limit the number of shards significantly if for instance a shard can not match any documents based on its rewrite method i.e., if date filters are mandatory to match but the shard bounds and the query are disjoint. +- **`rest_total_hits_as_int` (Optional, boolean)**: If true, hits.total are returned as an integer in the response. Defaults to false, which returns an object. +- **`routing` (Optional, string)**: Custom routing value used to route search operations to a specific shard. +- **`search_type` (Optional, Enum("query_then_fetch" | "dfs_query_then_fetch"))**: Indicates whether global term and document frequencies should be used when scoring returned documents. +- **`typed_keys` (Optional, boolean)**: Specifies whether aggregation and suggester names should be prefixed by their respective types in the response. + +## client.msearchTemplate [_msearch_template] +Run multiple templated searches. +Run multiple templated searches with a single request. +If you are providing a text file or text input to `curl`, use the `--data-binary` flag instead of `-d` to preserve newlines. +For example: -### Arguments [_arguments_21] - -* **Request (object):** - - * **`index` (Optional, string | string[])**: List of data streams, indices, and index aliases to search. - * **`searches` (Optional, { allow_no_indices, expand_wildcards, ignore_unavailable, index, preference, request_cache, routing, search_type, ccs_minimize_roundtrips, allow_partial_search_results, ignore_throttled } | { aggregations, collapse, query, explain, ext, stored_fields, docvalue_fields, knn, from, highlight, indices_boost, min_score, post_filter, profile, rescore, script_fields, search_after, size, sort, _source, fields, terminate_after, stats, timeout, track_scores, track_total_hits, version, runtime_mappings, seq_no_primary_term, pit, suggest }[])** - * **`allow_no_indices` (Optional, boolean)**: If false, the request returns an error if any wildcard expression, index alias, or _all value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting foo*,bar* returns an error if an index starts with foo but no index starts with bar. - * **`ccs_minimize_roundtrips` (Optional, boolean)**: If true, network roundtrips between the coordinating node and remote clusters are minimized for cross-cluster search requests. - * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard expressions can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. - * **`ignore_throttled` (Optional, boolean)**: If true, concrete, expanded or aliased indices are ignored when frozen. - * **`ignore_unavailable` (Optional, boolean)**: If true, missing or closed indices are not included in the response. - * **`include_named_queries_score` (Optional, boolean)**: Indicates whether hit.matched_queries should be rendered as a map that includes the name of the matched query associated with its score (true) or as an array containing the name of the matched queries (false) This functionality reruns each named query on every hit in a search response. Typically, this adds a small overhead to a request. However, using computationally expensive named queries on a large number of hits may add significant overhead. - * **`max_concurrent_searches` (Optional, number)**: Maximum number of concurrent searches the multi search API can execute. - * **`max_concurrent_shard_requests` (Optional, number)**: Maximum number of concurrent shard requests that each sub-search request executes per node. - * **`pre_filter_shard_size` (Optional, number)**: Defines a threshold that enforces a pre-filter roundtrip to prefilter search shards based on query rewriting if the number of shards the search request expands to exceeds the threshold. This filter roundtrip can limit the number of shards significantly if for instance a shard can not match any documents based on its rewrite method i.e., if date filters are mandatory to match but the shard bounds and the query are disjoint. - * **`rest_total_hits_as_int` (Optional, boolean)**: If true, hits.total are returned as an integer in the response. Defaults to false, which returns an object. - * **`routing` (Optional, string)**: Custom routing value used to route search operations to a specific shard. - * **`search_type` (Optional, Enum("query_then_fetch" | "dfs_query_then_fetch"))**: Indicates whether global term and document frequencies should be used when scoring returned documents. - * **`typed_keys` (Optional, boolean)**: Specifies whether aggregation and suggester names should be prefixed by their respective types in the response. - - - -## msearch_template [_msearch_template] +``` +$ cat requests +{ "index": "my-index" } +{ "id": "my-search-template", "params": { "query_string": "hello world", "from": 0, "size": 10 }} +{ "index": "my-other-index" } +{ "id": "my-other-search-template", "params": { "query_type": "match_all" }} -Run multiple templated searches. +$ curl -H "Content-Type: application/x-ndjson" -XGET localhost:9200/_msearch/template --data-binary "@requests"; echo +``` -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-msearch) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-msearch-template) ```ts client.msearchTemplate({ ... }) ``` +### Arguments [_arguments_msearch_template] +#### Request (object) [_request_msearch_template] -### Arguments [_arguments_22] - -* **Request (object):** - - * **`index` (Optional, string | string[])**: List of data streams, indices, and aliases to search. Supports wildcards (`*`). To search all data streams and indices, omit this parameter or use `*`. - * **`search_templates` (Optional, { allow_no_indices, expand_wildcards, ignore_unavailable, index, preference, request_cache, routing, search_type, ccs_minimize_roundtrips, allow_partial_search_results, ignore_throttled } | { aggregations, collapse, query, explain, ext, stored_fields, docvalue_fields, knn, from, highlight, indices_boost, min_score, post_filter, profile, rescore, script_fields, search_after, size, sort, _source, fields, terminate_after, stats, timeout, track_scores, track_total_hits, version, runtime_mappings, seq_no_primary_term, pit, suggest }[])** - * **`ccs_minimize_roundtrips` (Optional, boolean)**: If `true`, network round-trips are minimized for cross-cluster search requests. - * **`max_concurrent_searches` (Optional, number)**: Maximum number of concurrent searches the API can run. - * **`search_type` (Optional, Enum("query_then_fetch" | "dfs_query_then_fetch"))**: The type of the search operation. Available options: `query_then_fetch`, `dfs_query_then_fetch`. - * **`rest_total_hits_as_int` (Optional, boolean)**: If `true`, the response returns `hits.total` as an integer. If `false`, it returns `hits.total` as an object. - * **`typed_keys` (Optional, boolean)**: If `true`, the response prefixes aggregation and suggester names with their respective types. - +- **`index` (Optional, string | string[])**: A list of data streams, indices, and aliases to search. It supports wildcards (`*`). To search all data streams and indices, omit this parameter or use `*`. +- **`search_templates` (Optional, { allow_no_indices, expand_wildcards, ignore_unavailable, index, preference, request_cache, routing, search_type, ccs_minimize_roundtrips, allow_partial_search_results, ignore_throttled } | { aggregations, collapse, explain, ext, from, highlight, track_total_hits, indices_boost, docvalue_fields, knn, rank, min_score, post_filter, profile, query, rescore, retriever, script_fields, search_after, size, slice, sort, _source, fields, suggest, terminate_after, timeout, track_scores, version, seq_no_primary_term, stored_fields, pit, runtime_mappings, stats }[])** +- **`ccs_minimize_roundtrips` (Optional, boolean)**: If `true`, network round-trips are minimized for cross-cluster search requests. +- **`max_concurrent_searches` (Optional, number)**: The maximum number of concurrent searches the API can run. +- **`search_type` (Optional, Enum("query_then_fetch" | "dfs_query_then_fetch"))**: The type of the search operation. +- **`rest_total_hits_as_int` (Optional, boolean)**: If `true`, the response returns `hits.total` as an integer. If `false`, it returns `hits.total` as an object. +- **`typed_keys` (Optional, boolean)**: If `true`, the response prefixes aggregation and suggester names with their respective types. +## client.mtermvectors [_mtermvectors] +Get multiple term vectors. -## mtermvectors [_mtermvectors] +Get multiple term vectors with a single request. +You can specify existing documents by index and ID or provide artificial documents in the body of the request. +You can specify the index in the request body or request URI. +The response contains a `docs` array with all the fetched termvectors. +Each element has the structure provided by the termvectors API. -Get multiple term vectors. +**Artificial documents** -You can specify existing documents by index and ID or provide artificial documents in the body of the request. You can specify the index in the request body or request URI. The response contains a `docs` array with all the fetched termvectors. Each element has the structure provided by the termvectors API. +You can also use `mtermvectors` to generate term vectors for artificial documents provided in the body of the request. +The mapping used is determined by the specified `_index`. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-mtermvectors) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-mtermvectors) ```ts client.mtermvectors({ ... }) ``` - - -### Arguments [_arguments_23] - -* **Request (object):** - - * **`index` (Optional, string)**: Name of the index that contains the documents. - * **`docs` (Optional, { _id, _index, routing, _source, stored_fields, version, version_type }[])**: Array of existing or artificial documents. - * **`ids` (Optional, string[])**: Simplified syntax to specify documents by their ID if they’re in the same index. - * **`fields` (Optional, string | string[])**: List or wildcard expressions of fields to include in the statistics. Used as the default list unless a specific field list is provided in the `completion_fields` or `fielddata_fields` parameters. - * **`field_statistics` (Optional, boolean)**: If `true`, the response includes the document count, sum of document frequencies, and sum of total term frequencies. - * **`offsets` (Optional, boolean)**: If `true`, the response includes term offsets. - * **`payloads` (Optional, boolean)**: If `true`, the response includes term payloads. - * **`positions` (Optional, boolean)**: If `true`, the response includes term positions. - * **`preference` (Optional, string)**: Specifies the node or shard the operation should be performed on. Random by default. - * **`realtime` (Optional, boolean)**: If true, the request is real-time as opposed to near-real-time. - * **`routing` (Optional, string)**: Custom value used to route operations to a specific shard. - * **`term_statistics` (Optional, boolean)**: If true, the response includes term frequency and document frequency. - * **`version` (Optional, number)**: If `true`, returns the document version as part of a hit. - * **`version_type` (Optional, Enum("internal" | "external" | "external_gte" | "force"))**: Specific version type. - - - -## open_point_in_time [_open_point_in_time] - +### Arguments [_arguments_mtermvectors] + +#### Request (object) [_request_mtermvectors] + +- **`index` (Optional, string)**: The name of the index that contains the documents. +- **`docs` (Optional, { _id, _index, routing, _source, stored_fields, version, version_type }[])**: An array of existing or artificial documents. +- **`ids` (Optional, string[])**: A simplified syntax to specify documents by their ID if they're in the same index. +- **`fields` (Optional, string | string[])**: A list or wildcard expressions of fields to include in the statistics. It is used as the default list unless a specific field list is provided in the `completion_fields` or `fielddata_fields` parameters. +- **`field_statistics` (Optional, boolean)**: If `true`, the response includes the document count, sum of document frequencies, and sum of total term frequencies. +- **`offsets` (Optional, boolean)**: If `true`, the response includes term offsets. +- **`payloads` (Optional, boolean)**: If `true`, the response includes term payloads. +- **`positions` (Optional, boolean)**: If `true`, the response includes term positions. +- **`preference` (Optional, string)**: The node or shard the operation should be performed on. It is random by default. +- **`realtime` (Optional, boolean)**: If true, the request is real-time as opposed to near-real-time. +- **`routing` (Optional, string)**: A custom value used to route operations to a specific shard. +- **`term_statistics` (Optional, boolean)**: If true, the response includes term frequency and document frequency. +- **`version` (Optional, number)**: If `true`, returns the document version as part of a hit. +- **`version_type` (Optional, Enum("internal" | "external" | "external_gte" | "force"))**: The version type. + +## client.openPointInTime [_open_point_in_time] Open a point in time. -A search request by default runs against the most recent visible data of the target indices, which is called point in time. Elasticsearch pit (point in time) is a lightweight view into the state of the data as it existed when initiated. In some cases, it’s preferred to perform multiple search requests using the same point in time. For example, if refreshes happen between `search_after` requests, then the results of those requests might not be consistent as changes happening between searches are only visible to the more recent point in time. +A search request by default runs against the most recent visible data of the target indices, +which is called point in time. Elasticsearch pit (point in time) is a lightweight view into the +state of the data as it existed when initiated. In some cases, it’s preferred to perform multiple +search requests using the same point in time. For example, if refreshes happen between +`search_after` requests, then the results of those requests might not be consistent as changes happening +between searches are only visible to the more recent point in time. A point in time must be opened explicitly before being used in search requests. A subsequent search request with the `pit` parameter must not specify `index`, `routing`, or `preference` values as these parameters are copied from the point in time. -Just like regular searches, you can use `from` and `size` to page through point in time search results, up to the first 10,000 hits. If you want to retrieve more hits, use PIT with `search_after`. +Just like regular searches, you can use `from` and `size` to page through point in time search results, up to the first 10,000 hits. +If you want to retrieve more hits, use PIT with `search_after`. -::::{important} -The open point in time request and each subsequent search request can return different identifiers; always use the most recently received ID for the next search request. -:::: +IMPORTANT: The open point in time request and each subsequent search request can return different identifiers; always use the most recently received ID for the next search request. - -When a PIT that contains shard failures is used in a search request, the missing are always reported in the search response as a `NoShardAvailableActionException` exception. To get rid of these exceptions, a new PIT needs to be created so that shards missing from the previous PIT can be handled, assuming they become available in the meantime. +When a PIT that contains shard failures is used in a search request, the missing are always reported in the search response as a `NoShardAvailableActionException` exception. +To get rid of these exceptions, a new PIT needs to be created so that shards missing from the previous PIT can be handled, assuming they become available in the meantime. **Keeping point in time alive** -The `keep_alive` parameter, which is passed to a open point in time request and search request, extends the time to live of the corresponding point in time. The value does not need to be long enough to process all data — it just needs to be long enough for the next request. - -Normally, the background merge process optimizes the index by merging together smaller segments to create new, bigger segments. Once the smaller segments are no longer needed they are deleted. However, open point-in-times prevent the old segments from being deleted since they are still in use. +The `keep_alive` parameter, which is passed to a open point in time request and search request, extends the time to live of the corresponding point in time. +The value does not need to be long enough to process all data — it just needs to be long enough for the next request. -::::{tip} -Keeping older segments alive means that more disk space and file handles are needed. Ensure that you have configured your nodes to have ample free file handles. -:::: +Normally, the background merge process optimizes the index by merging together smaller segments to create new, bigger segments. +Once the smaller segments are no longer needed they are deleted. +However, open point-in-times prevent the old segments from being deleted since they are still in use. +TIP: Keeping older segments alive means that more disk space and file handles are needed. +Ensure that you have configured your nodes to have ample free file handles. -Additionally, if a segment contains deleted or updated documents then the point in time must keep track of whether each document in the segment was live at the time of the initial search request. Ensure that your nodes have sufficient heap space if you have many open point-in-times on an index that is subject to ongoing deletes or updates. Note that a point-in-time doesn’t prevent its associated indices from being deleted. You can check how many point-in-times (that is, search contexts) are open with the nodes stats API. +Additionally, if a segment contains deleted or updated documents then the point in time must keep track of whether each document in the segment was live at the time of the initial search request. +Ensure that your nodes have sufficient heap space if you have many open point-in-times on an index that is subject to ongoing deletes or updates. +Note that a point-in-time doesn't prevent its associated indices from being deleted. +You can check how many point-in-times (that is, search contexts) are open with the nodes stats API. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-open-point-in-time) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-open-point-in-time) ```ts client.openPointInTime({ index, keep_alive }) ``` +### Arguments [_arguments_open_point_in_time] +#### Request (object) [_request_open_point_in_time] -### Arguments [_arguments_24] - -* **Request (object):** - - * **`index` (string | string[])**: A list of index names to open point in time; use `_all` or empty string to perform the operation on all indices - * **`keep_alive` (string | -1 | 0)**: Extend the length of time that the point in time persists. - * **`index_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: Filter indices if the provided query rewrites to `match_none` on every shard. - * **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. - * **`preference` (Optional, string)**: The node or shard the operation should be performed on. By default, it is random. - * **`routing` (Optional, string)**: A custom value that is used to route operations to a specific shard. - * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. - * **`allow_partial_search_results` (Optional, boolean)**: Indicates whether the point in time tolerates unavailable shards or shard failures when initially creating the PIT. If `false`, creating a point in time request when a shard is missing or unavailable will throw an exception. If `true`, the point in time will contain all the shards that are available at the time of the request. +- **`index` (string | string[])**: A list of index names to open point in time; use `_all` or empty string to perform the operation on all indices +- **`keep_alive` (string | -1 | 0)**: Extend the length of time that the point in time persists. +- **`index_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: Filter indices if the provided query rewrites to `match_none` on every shard. +- **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. +- **`preference` (Optional, string)**: The node or shard the operation should be performed on. By default, it is random. +- **`routing` (Optional, string)**: A custom value that is used to route operations to a specific shard. +- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. +- **`allow_partial_search_results` (Optional, boolean)**: Indicates whether the point in time tolerates unavailable shards or shard failures when initially creating the PIT. If `false`, creating a point in time request when a shard is missing or unavailable will throw an exception. If `true`, the point in time will contain all the shards that are available at the time of the request. +- **`max_concurrent_shard_requests` (Optional, number)**: Maximum number of concurrent shard requests that each sub-search request executes per node. +## client.ping [_ping] +Ping the cluster. +Get information about whether the cluster is running. - -## ping [_ping] - -Ping the cluster. Get information about whether the cluster is running. - -[Endpoint documentation](docs-content://get-started/index.md) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/group/endpoint-cluster) ```ts client.ping() ``` +## client.putScript [_put_script] +Create or update a script or search template. +Creates or updates a stored script or search template. -## put_script [_put_script] - -Create or update a script or search template. Creates or updates a stored script or search template. - -[Endpoint documentation](docs-content://explore-analyze/scripting.md) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-put-script) ```ts client.putScript({ id, script }) ``` +### Arguments [_arguments_put_script] +#### Request (object) [_request_put_script] -### Arguments [_arguments_25] - -* **Request (object):** - - * **`id` (string)**: Identifier for the stored script or search template. Must be unique within the cluster. - * **`script` ({ lang, options, source })**: Contains the script or search template, its parameters, and its language. - * **`context` (Optional, string)**: Context in which the script or search template should run. To prevent errors, the API immediately compiles the script or template in this context. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - - - -## rank_eval [_rank_eval] +- **`id` (string)**: The identifier for the stored script or search template. It must be unique within the cluster. +- **`script` ({ lang, options, source })**: The script or search template, its parameters, and its language. +- **`context` (Optional, string)**: The context in which the script or search template should run. To prevent errors, the API immediately compiles the script or template in this context. +- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout. +- **`timeout` (Optional, string | -1 | 0)**: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout. +## client.rankEval [_rank_eval] Evaluate ranked search results. Evaluate the quality of ranked search results over a set of typical search queries. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rank-eval) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-rank-eval) ```ts client.rankEval({ requests }) ``` +### Arguments [_arguments_rank_eval] +#### Request (object) [_request_rank_eval] -### Arguments [_arguments_26] - -* **Request (object):** - - * **`requests` ({ id, request, ratings, template_id, params }[])**: A set of typical search requests, together with their provided ratings. - * **`index` (Optional, string | string[])**: List of data streams, indices, and index aliases used to limit the request. Wildcard (`*`) expressions are supported. To target all data streams and indices in a cluster, omit this parameter or use `_all` or `*`. - * **`metric` (Optional, { precision, recall, mean_reciprocal_rank, dcg, expected_reciprocal_rank })**: Definition of the evaluation metric to calculate. - * **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. - * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Whether to expand wildcard expression to concrete indices that are open, closed or both. - * **`ignore_unavailable` (Optional, boolean)**: If `true`, missing or closed indices are not included in the response. - * **`search_type` (Optional, string)**: Search operation type - - - -## reindex [_reindex] +- **`requests` ({ id, request, ratings, template_id, params }[])**: A set of typical search requests, together with their provided ratings. +- **`index` (Optional, string | string[])**: A list of data streams, indices, and index aliases used to limit the request. Wildcard (`*`) expressions are supported. To target all data streams and indices in a cluster, omit this parameter or use `_all` or `*`. +- **`metric` (Optional, { precision, recall, mean_reciprocal_rank, dcg, expected_reciprocal_rank })**: Definition of the evaluation metric to calculate. +- **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. +- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Whether to expand wildcard expression to concrete indices that are open, closed or both. +- **`ignore_unavailable` (Optional, boolean)**: If `true`, missing or closed indices are not included in the response. +- **`search_type` (Optional, string)**: Search operation type +## client.reindex [_reindex] Reindex documents. -Copy documents from a source to a destination. You can copy all documents to the destination index or reindex a subset of the documents. The source can be any existing index, alias, or data stream. The destination must differ from the source. For example, you cannot reindex a data stream into itself. - -::::{important} -Reindex requires `_source` to be enabled for all documents in the source. The destination should be configured as wanted before calling the reindex API. Reindex does not copy the settings from the source or its associated template. Mappings, shard counts, and replicas, for example, must be configured ahead of time. -:::: +Copy documents from a source to a destination. +You can copy all documents to the destination index or reindex a subset of the documents. +The source can be any existing index, alias, or data stream. +The destination must differ from the source. +For example, you cannot reindex a data stream into itself. +IMPORTANT: Reindex requires `_source` to be enabled for all documents in the source. +The destination should be configured as wanted before calling the reindex API. +Reindex does not copy the settings from the source or its associated template. +Mappings, shard counts, and replicas, for example, must be configured ahead of time. If the Elasticsearch security features are enabled, you must have the following security privileges: @@ -1313,33 +1318,41 @@ If the Elasticsearch security features are enabled, you must have the following * To automatically create a data stream or index with a reindex API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege for the destination data stream, index, or alias. * If reindexing from a remote cluster, the `source.remote.user` must have the `monitor` cluster privilege and the `read` index privilege for the source data stream, index, or alias. -If reindexing from a remote cluster, you must explicitly allow the remote host in the `reindex.remote.whitelist` setting. Automatic data stream creation requires a matching index template with data stream enabled. +If reindexing from a remote cluster, you must explicitly allow the remote host in the `reindex.remote.whitelist` setting. +Automatic data stream creation requires a matching index template with data stream enabled. -The `dest` element can be configured like the index API to control optimistic concurrency control. Omitting `version_type` or setting it to `internal` causes Elasticsearch to blindly dump documents into the destination, overwriting any that happen to have the same ID. +The `dest` element can be configured like the index API to control optimistic concurrency control. +Omitting `version_type` or setting it to `internal` causes Elasticsearch to blindly dump documents into the destination, overwriting any that happen to have the same ID. Setting `version_type` to `external` causes Elasticsearch to preserve the `version` from the source, create any documents that are missing, and update any documents that have an older version in the destination than they do in the source. -Setting `op_type` to `create` causes the reindex API to create only missing documents in the destination. All existing documents will cause a version conflict. - -::::{important} -Because data streams are append-only, any reindex request to a destination data stream must have an `op_type` of `create`. A reindex can only add new documents to a destination data stream. It cannot update existing documents in a destination data stream. -:::: +Setting `op_type` to `create` causes the reindex API to create only missing documents in the destination. +All existing documents will cause a version conflict. +IMPORTANT: Because data streams are append-only, any reindex request to a destination data stream must have an `op_type` of `create`. +A reindex can only add new documents to a destination data stream. +It cannot update existing documents in a destination data stream. -By default, version conflicts abort the reindex process. To continue reindexing if there are conflicts, set the `conflicts` request body property to `proceed`. In this case, the response includes a count of the version conflicts that were encountered. Note that the handling of other error types is unaffected by the `conflicts` property. Additionally, if you opt to count version conflicts, the operation could attempt to reindex more documents from the source than `max_docs` until it has successfully indexed `max_docs` documents into the target or it has gone through every document in the source query. - -::::{note} -The reindex API makes no effort to handle ID collisions. The last document written will "win" but the order isn’t usually predictable so it is not a good idea to rely on this behavior. Instead, make sure that IDs are unique by using a script. -:::: +By default, version conflicts abort the reindex process. +To continue reindexing if there are conflicts, set the `conflicts` request body property to `proceed`. +In this case, the response includes a count of the version conflicts that were encountered. +Note that the handling of other error types is unaffected by the `conflicts` property. +Additionally, if you opt to count version conflicts, the operation could attempt to reindex more documents from the source than `max_docs` until it has successfully indexed `max_docs` documents into the target or it has gone through every document in the source query. +NOTE: The reindex API makes no effort to handle ID collisions. +The last document written will "win" but the order isn't usually predictable so it is not a good idea to rely on this behavior. +Instead, make sure that IDs are unique by using a script. **Running reindex asynchronously** -If the request contains `wait_for_completion=false`, Elasticsearch performs some preflight checks, launches the request, and returns a task you can use to cancel or get the status of the task. Elasticsearch creates a record of this task as a document at `_tasks/`. +If the request contains `wait_for_completion=false`, Elasticsearch performs some preflight checks, launches the request, and returns a task you can use to cancel or get the status of the task. +Elasticsearch creates a record of this task as a document at `_tasks/`. **Reindex from multiple sources** -If you have many sources to reindex it is generally better to reindex them one at a time rather than using a glob pattern to pick up multiple sources. That way you can resume the process if there are any errors by removing the partially completed source and starting over. It also makes parallelizing the process fairly simple: split the list of sources to reindex and run each list in parallel. +If you have many sources to reindex it is generally better to reindex them one at a time rather than using a glob pattern to pick up multiple sources. +That way you can resume the process if there are any errors by removing the partially completed source and starting over. +It also makes parallelizing the process fairly simple: split the list of sources to reindex and run each list in parallel. For example, you can use a bash script like this: @@ -1358,27 +1371,32 @@ done **Throttling** -Set `requests_per_second` to any positive decimal number (`1.4`, `6`, `1000`, for example) to throttle the rate at which reindex issues batches of index operations. Requests are throttled by padding each batch with a wait time. To turn off throttling, set `requests_per_second` to `-1`. +Set `requests_per_second` to any positive decimal number (`1.4`, `6`, `1000`, for example) to throttle the rate at which reindex issues batches of index operations. +Requests are throttled by padding each batch with a wait time. +To turn off throttling, set `requests_per_second` to `-1`. -The throttling is done by waiting between batches so that the scroll that reindex uses internally can be given a timeout that takes into account the padding. The padding time is the difference between the batch size divided by the `requests_per_second` and the time spent writing. By default the batch size is `1000`, so if `requests_per_second` is set to `500`: +The throttling is done by waiting between batches so that the scroll that reindex uses internally can be given a timeout that takes into account the padding. +The padding time is the difference between the batch size divided by the `requests_per_second` and the time spent writing. +By default the batch size is `1000`, so if `requests_per_second` is set to `500`: ``` target_time = 1000 / 500 per second = 2 seconds wait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds ``` -Since the batch is issued as a single bulk request, large batch sizes cause Elasticsearch to create many requests and then wait for a while before starting the next set. This is "bursty" instead of "smooth". +Since the batch is issued as a single bulk request, large batch sizes cause Elasticsearch to create many requests and then wait for a while before starting the next set. +This is "bursty" instead of "smooth". **Slicing** -Reindex supports sliced scroll to parallelize the reindexing process. This parallelization can improve efficiency and provide a convenient way to break the request down into smaller parts. - -::::{note} -Reindexing from remote clusters does not support manual or automatic slicing. -:::: +Reindex supports sliced scroll to parallelize the reindexing process. +This parallelization can improve efficiency and provide a convenient way to break the request down into smaller parts. +NOTE: Reindexing from remote clusters does not support manual or automatic slicing. -You can slice a reindex request manually by providing a slice ID and total number of slices to each request. You can also let reindex automatically parallelize by using sliced scroll to slice on `_id`. The `slices` parameter specifies the number of slices to use. +You can slice a reindex request manually by providing a slice ID and total number of slices to each request. +You can also let reindex automatically parallelize by using sliced scroll to slice on `_id`. +The `slices` parameter specifies the number of slices to use. Adding `slices` to the reindex request just automates the manual process, creating sub-requests which means it has some quirks: @@ -1387,13 +1405,16 @@ Adding `slices` to the reindex request just automates the manual process, creati * These sub-requests are individually addressable for things like cancellation and rethrottling. * Rethrottling the request with `slices` will rethrottle the unfinished sub-request proportionally. * Canceling the request with `slices` will cancel each sub-request. -* Due to the nature of `slices`, each sub-request won’t get a perfectly even portion of the documents. All documents will be addressed, but some slices may be larger than others. Expect larger slices to have a more even distribution. +* Due to the nature of `slices`, each sub-request won't get a perfectly even portion of the documents. All documents will be addressed, but some slices may be larger than others. Expect larger slices to have a more even distribution. * Parameters like `requests_per_second` and `max_docs` on a request with `slices` are distributed proportionally to each sub-request. Combine that with the previous point about distribution being uneven and you should conclude that using `max_docs` with `slices` might not result in exactly `max_docs` documents being reindexed. * Each sub-request gets a slightly different snapshot of the source, though these are all taken at approximately the same time. -If slicing automatically, setting `slices` to `auto` will choose a reasonable number for most indices. If slicing manually or otherwise tuning automatic slicing, use the following guidelines. +If slicing automatically, setting `slices` to `auto` will choose a reasonable number for most indices. +If slicing manually or otherwise tuning automatic slicing, use the following guidelines. -Query performance is most efficient when the number of slices is equal to the number of shards in the index. If that number is large (for example, `500`), choose a lower number as too many slices will hurt performance. Setting slices higher than the number of shards generally does not improve efficiency and adds overhead. +Query performance is most efficient when the number of slices is equal to the number of shards in the index. +If that number is large (for example, `500`), choose a lower number as too many slices will hurt performance. +Setting slices higher than the number of shards generally does not improve efficiency and adds overhead. Indexing performance scales linearly across available resources with the number of slices. @@ -1401,9 +1422,14 @@ Whether query or indexing performance dominates the runtime depends on the docum **Modify documents during reindexing** -Like `_update_by_query`, reindex operations support a script that modifies the document. Unlike `_update_by_query`, the script is allowed to modify the document’s metadata. +Like `_update_by_query`, reindex operations support a script that modifies the document. +Unlike `_update_by_query`, the script is allowed to modify the document's metadata. -Just as in `_update_by_query`, you can set `ctx.op` to change the operation that is run on the destination. For example, set `ctx.op` to `noop` if your script decides that the document doesn’t have to be indexed in the destination. This "no operation" will be reported in the `noop` counter in the response body. Set `ctx.op` to `delete` if your script decides that the document must be deleted from the destination. The deletion will be reported in the `deleted` counter in the response body. Setting `ctx.op` to anything else will return an error, as will setting any other field in `ctx`. +Just as in `_update_by_query`, you can set `ctx.op` to change the operation that is run on the destination. +For example, set `ctx.op` to `noop` if your script decides that the document doesn’t have to be indexed in the destination. This "no operation" will be reported in the `noop` counter in the response body. +Set `ctx.op` to `delete` if your script decides that the document must be deleted from the destination. +The deletion will be reported in the `deleted` counter in the response body. +Setting `ctx.op` to anything else will return an error, as will setting any other field in `ctx`. Think of the possibilities! Just be careful; you are able to change: @@ -1412,460 +1438,598 @@ Think of the possibilities! Just be careful; you are able to change: * `_version` * `_routing` -Setting `_version` to `null` or clearing it from the `ctx` map is just like not sending the version in an indexing request. It will cause the document to be overwritten in the destination regardless of the version on the target or the version type you use in the reindex API. +Setting `_version` to `null` or clearing it from the `ctx` map is just like not sending the version in an indexing request. +It will cause the document to be overwritten in the destination regardless of the version on the target or the version type you use in the reindex API. **Reindex from remote** -Reindex supports reindexing from a remote Elasticsearch cluster. The `host` parameter must contain a scheme, host, port, and optional path. The `username` and `password` parameters are optional and when they are present the reindex operation will connect to the remote Elasticsearch node using basic authentication. Be sure to use HTTPS when using basic authentication or the password will be sent in plain text. There are a range of settings available to configure the behavior of the HTTPS connection. +Reindex supports reindexing from a remote Elasticsearch cluster. +The `host` parameter must contain a scheme, host, port, and optional path. +The `username` and `password` parameters are optional and when they are present the reindex operation will connect to the remote Elasticsearch node using basic authentication. +Be sure to use HTTPS when using basic authentication or the password will be sent in plain text. +There are a range of settings available to configure the behavior of the HTTPS connection. -When using Elastic Cloud, it is also possible to authenticate against the remote cluster through the use of a valid API key. Remote hosts must be explicitly allowed with the `reindex.remote.whitelist` setting. It can be set to a comma delimited list of allowed remote host and port combinations. Scheme is ignored; only the host and port are used. For example: +When using Elastic Cloud, it is also possible to authenticate against the remote cluster through the use of a valid API key. +Remote hosts must be explicitly allowed with the `reindex.remote.whitelist` setting. +It can be set to a comma delimited list of allowed remote host and port combinations. +Scheme is ignored; only the host and port are used. +For example: ``` reindex.remote.whitelist: [otherhost:9200, another:9200, 127.0.10.*:9200, localhost:*"] ``` -The list of allowed hosts must be configured on any nodes that will coordinate the reindex. This feature should work with remote clusters of any version of Elasticsearch. This should enable you to upgrade from any version of Elasticsearch to the current version by reindexing from a cluster of the old version. - -::::{warning} -Elasticsearch does not support forward compatibility across major versions. For example, you cannot reindex from a 7.x cluster into a 6.x cluster. -:::: +The list of allowed hosts must be configured on any nodes that will coordinate the reindex. +This feature should work with remote clusters of any version of Elasticsearch. +This should enable you to upgrade from any version of Elasticsearch to the current version by reindexing from a cluster of the old version. +WARNING: Elasticsearch does not support forward compatibility across major versions. +For example, you cannot reindex from a 7.x cluster into a 6.x cluster. To enable queries sent to older versions of Elasticsearch, the `query` parameter is sent directly to the remote host without validation or modification. -::::{note} -Reindexing from remote clusters does not support manual or automatic slicing. -:::: +NOTE: Reindexing from remote clusters does not support manual or automatic slicing. - -Reindexing from a remote server uses an on-heap buffer that defaults to a maximum size of 100mb. If the remote index includes very large documents you’ll need to use a smaller batch size. It is also possible to set the socket read timeout on the remote connection with the `socket_timeout` field and the connection timeout with the `connect_timeout` field. Both default to 30 seconds. +Reindexing from a remote server uses an on-heap buffer that defaults to a maximum size of 100mb. +If the remote index includes very large documents you'll need to use a smaller batch size. +It is also possible to set the socket read timeout on the remote connection with the `socket_timeout` field and the connection timeout with the `connect_timeout` field. +Both default to 30 seconds. **Configuring SSL parameters** -Reindex from remote supports configurable SSL settings. These must be specified in the `elasticsearch.yml` file, with the exception of the secure settings, which you add in the Elasticsearch keystore. It is not possible to configure SSL in the body of the reindex request. +Reindex from remote supports configurable SSL settings. +These must be specified in the `elasticsearch.yml` file, with the exception of the secure settings, which you add in the Elasticsearch keystore. +It is not possible to configure SSL in the body of the reindex request. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-reindex) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-reindex) ```ts client.reindex({ dest, source }) ``` - - -### Arguments [_arguments_27] - -* **Request (object):** - - * **`dest` ({ index, op_type, pipeline, routing, version_type })**: The destination you are copying to. - * **`source` ({ index, query, remote, size, slice, sort, _source, runtime_mappings })**: The source you are copying from. - * **`conflicts` (Optional, Enum("abort" | "proceed"))**: Indicates whether to continue reindexing even when there are conflicts. - * **`max_docs` (Optional, number)**: The maximum number of documents to reindex. By default, all documents are reindexed. If it is a value less then or equal to `scroll_size`, a scroll will not be used to retrieve the results for the operation. If `conflicts` is set to `proceed`, the reindex operation could attempt to reindex more documents from the source than `max_docs` until it has successfully indexed `max_docs` documents into the target or it has gone through every document in the source query. - * **`script` (Optional, { source, id, params, lang, options })**: The script to run to update the document source or metadata when reindexing. - * **`size` (Optional, number)** - * **`refresh` (Optional, boolean)**: If `true`, the request refreshes affected shards to make this operation visible to search. - * **`requests_per_second` (Optional, float)**: The throttle for this request in sub-requests per second. By default, there is no throttle. - * **`scroll` (Optional, string | -1 | 0)**: The period of time that a consistent view of the index should be maintained for scrolled search. - * **`slices` (Optional, number | Enum("auto"))**: The number of slices this task should be divided into. It defaults to one slice, which means the task isn’t sliced into subtasks. Reindex supports sliced scroll to parallelize the reindexing process. This parallelization can improve efficiency and provide a convenient way to break the request down into smaller parts. NOTE: Reindexing from remote clusters does not support manual or automatic slicing. If set to `auto`, Elasticsearch chooses the number of slices to use. This setting will use one slice per shard, up to a certain limit. If there are multiple sources, it will choose the number of slices based on the index or backing index with the smallest number of shards. - * **`timeout` (Optional, string | -1 | 0)**: The period each indexing waits for automatic index creation, dynamic mapping updates, and waiting for active shards. By default, Elasticsearch waits for at least one minute before failing. The actual wait time could be longer, particularly when multiple waits occur. - * **`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))**: The number of shard copies that must be active before proceeding with the operation. Set it to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). The default value is one, which means it waits for each primary shard to be active. - * **`wait_for_completion` (Optional, boolean)**: If `true`, the request blocks until the operation is complete. - * **`require_alias` (Optional, boolean)**: If `true`, the destination must be an index alias. - - - -## reindex_rethrottle [_reindex_rethrottle] - +### Arguments [_arguments_reindex] + +#### Request (object) [_request_reindex] + +- **`dest` ({ index, op_type, pipeline, routing, version_type })**: The destination you are copying to. +- **`source` ({ index, query, remote, size, slice, sort, _source, runtime_mappings })**: The source you are copying from. +- **`conflicts` (Optional, Enum("abort" | "proceed"))**: Indicates whether to continue reindexing even when there are conflicts. +- **`max_docs` (Optional, number)**: The maximum number of documents to reindex. By default, all documents are reindexed. If it is a value less then or equal to `scroll_size`, a scroll will not be used to retrieve the results for the operation. If `conflicts` is set to `proceed`, the reindex operation could attempt to reindex more documents from the source than `max_docs` until it has successfully indexed `max_docs` documents into the target or it has gone through every document in the source query. +- **`script` (Optional, { source, id, params, lang, options })**: The script to run to update the document source or metadata when reindexing. +- **`size` (Optional, number)** +- **`refresh` (Optional, boolean)**: If `true`, the request refreshes affected shards to make this operation visible to search. +- **`requests_per_second` (Optional, float)**: The throttle for this request in sub-requests per second. By default, there is no throttle. +- **`scroll` (Optional, string | -1 | 0)**: The period of time that a consistent view of the index should be maintained for scrolled search. +- **`slices` (Optional, number | Enum("auto"))**: The number of slices this task should be divided into. It defaults to one slice, which means the task isn't sliced into subtasks. Reindex supports sliced scroll to parallelize the reindexing process. This parallelization can improve efficiency and provide a convenient way to break the request down into smaller parts. NOTE: Reindexing from remote clusters does not support manual or automatic slicing. If set to `auto`, Elasticsearch chooses the number of slices to use. This setting will use one slice per shard, up to a certain limit. If there are multiple sources, it will choose the number of slices based on the index or backing index with the smallest number of shards. +- **`timeout` (Optional, string | -1 | 0)**: The period each indexing waits for automatic index creation, dynamic mapping updates, and waiting for active shards. By default, Elasticsearch waits for at least one minute before failing. The actual wait time could be longer, particularly when multiple waits occur. +- **`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))**: The number of shard copies that must be active before proceeding with the operation. Set it to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). The default value is one, which means it waits for each primary shard to be active. +- **`wait_for_completion` (Optional, boolean)**: If `true`, the request blocks until the operation is complete. +- **`require_alias` (Optional, boolean)**: If `true`, the destination must be an index alias. + +## client.reindexRethrottle [_reindex_rethrottle] Throttle a reindex operation. -Change the number of requests per second for a particular reindex operation. For example: +Change the number of requests per second for a particular reindex operation. +For example: ``` POST _reindex/r1A2WoRbTwKZ516z6NEs5A:36619/_rethrottle?requests_per_second=-1 ``` -Rethrottling that speeds up the query takes effect immediately. Rethrottling that slows down the query will take effect after completing the current batch. This behavior prevents scroll timeouts. +Rethrottling that speeds up the query takes effect immediately. +Rethrottling that slows down the query will take effect after completing the current batch. +This behavior prevents scroll timeouts. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-reindex) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-reindex) ```ts client.reindexRethrottle({ task_id }) ``` +### Arguments [_arguments_reindex_rethrottle] +#### Request (object) [_request_reindex_rethrottle] -### Arguments [_arguments_28] - -* **Request (object):** - - * **`task_id` (string)**: The task identifier, which can be found by using the tasks API. - * **`requests_per_second` (Optional, float)**: The throttle for this request in sub-requests per second. It can be either `-1` to turn off throttling or any decimal number like `1.7` or `12` to throttle to that level. - - - -## render_search_template [_render_search_template] +- **`task_id` (string)**: The task identifier, which can be found by using the tasks API. +- **`requests_per_second` (Optional, float)**: The throttle for this request in sub-requests per second. It can be either `-1` to turn off throttling or any decimal number like `1.7` or `12` to throttle to that level. +## client.renderSearchTemplate [_render_search_template] Render a search template. Render a search template as a search request body. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-render-search-template) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-render-search-template) ```ts client.renderSearchTemplate({ ... }) ``` +### Arguments [_arguments_render_search_template] +#### Request (object) [_request_render_search_template] -### Arguments [_arguments_29] - -* **Request (object):** - - * **`id` (Optional, string)**: ID of the search template to render. If no `source` is specified, this or the `id` request body parameter is required. - * **`file` (Optional, string)** - * **`params` (Optional, Record)**: Key-value pairs used to replace Mustache variables in the template. The key is the variable name. The value is the variable value. - * **`source` (Optional, string)**: An inline search template. Supports the same parameters as the search API’s request body. These parameters also support Mustache variables. If no `id` or `` is specified, this parameter is required. - - - -## scripts_painless_execute [_scripts_painless_execute] +- **`id` (Optional, string)**: The ID of the search template to render. If no `source` is specified, this or the `id` request body parameter is required. +- **`file` (Optional, string)** +- **`params` (Optional, Record)**: Key-value pairs used to replace Mustache variables in the template. The key is the variable name. The value is the variable value. +- **`source` (Optional, string | { aggregations, collapse, explain, ext, from, highlight, track_total_hits, indices_boost, docvalue_fields, knn, rank, min_score, post_filter, profile, query, rescore, retriever, script_fields, search_after, size, slice, sort, _source, fields, suggest, terminate_after, timeout, track_scores, version, seq_no_primary_term, stored_fields, pit, runtime_mappings, stats })**: An inline search template. It supports the same parameters as the search API's request body. These parameters also support Mustache variables. If no `id` or `` is specified, this parameter is required. +## client.scriptsPainlessExecute [_scripts_painless_execute] Run a script. -Runs a script and returns a result. Use this API to build and test scripts, such as when defining a script for a runtime field. This API requires very few dependencies and is especially useful if you don’t have permissions to write documents on a cluster. +Runs a script and returns a result. +Use this API to build and test scripts, such as when defining a script for a runtime field. +This API requires very few dependencies and is especially useful if you don't have permissions to write documents on a cluster. -The API uses several *contexts*, which control how scripts are run, what variables are available at runtime, and what the return type is. +The API uses several _contexts_, which control how scripts are run, what variables are available at runtime, and what the return type is. -Each context requires a script, but additional parameters depend on the context you’re using for that script. +Each context requires a script, but additional parameters depend on the context you're using for that script. -[Endpoint documentation](elasticsearch://reference/scripting-languages/painless/painless-api-examples.md) +[Endpoint documentation](https://www.elastic.co/docs/reference/scripting-languages/painless/painless-api-examples) ```ts client.scriptsPainlessExecute({ ... }) ``` +### Arguments [_arguments_scripts_painless_execute] +#### Request (object) [_request_scripts_painless_execute] -### Arguments [_arguments_30] - -* **Request (object):** - - * **`context` (Optional, Enum("painless_test" | "filter" | "score" | "boolean_field" | "date_field" | "double_field" | "geo_point_field" | "ip_field" | "keyword_field" | "long_field" | "composite_field"))**: The context that the script should run in. NOTE: Result ordering in the field contexts is not guaranteed. - * **`context_setup` (Optional, { document, index, query })**: Additional parameters for the `context`. NOTE: This parameter is required for all contexts except `painless_test`, which is the default if no value is provided for `context`. - * **`script` (Optional, { source, id, params, lang, options })**: The Painless script to run. - - - -## scroll [_scroll] +- **`context` (Optional, Enum("painless_test" | "filter" | "score" | "boolean_field" | "date_field" | "double_field" | "geo_point_field" | "ip_field" | "keyword_field" | "long_field" | "composite_field"))**: The context that the script should run in. NOTE: Result ordering in the field contexts is not guaranteed. +- **`context_setup` (Optional, { document, index, query })**: Additional parameters for the `context`. NOTE: This parameter is required for all contexts except `painless_test`, which is the default if no value is provided for `context`. +- **`script` (Optional, { source, id, params, lang, options })**: The Painless script to run. +## client.scroll [_scroll] Run a scrolling search. -::::{important} -The scroll API is no longer recommend for deep pagination. If you need to preserve the index state while paging through more than 10,000 hits, use the `search_after` parameter with a point in time (PIT). -:::: - +IMPORTANT: The scroll API is no longer recommend for deep pagination. If you need to preserve the index state while paging through more than 10,000 hits, use the `search_after` parameter with a point in time (PIT). -The scroll API gets large sets of results from a single scrolling search request. To get the necessary scroll ID, submit a search API request that includes an argument for the `scroll` query parameter. The `scroll` parameter indicates how long Elasticsearch should retain the search context for the request. The search response returns a scroll ID in the `_scroll_id` response body parameter. You can then use the scroll ID with the scroll API to retrieve the next batch of results for the request. If the Elasticsearch security features are enabled, the access to the results of a specific scroll ID is restricted to the user or API key that submitted the search. +The scroll API gets large sets of results from a single scrolling search request. +To get the necessary scroll ID, submit a search API request that includes an argument for the `scroll` query parameter. +The `scroll` parameter indicates how long Elasticsearch should retain the search context for the request. +The search response returns a scroll ID in the `_scroll_id` response body parameter. +You can then use the scroll ID with the scroll API to retrieve the next batch of results for the request. +If the Elasticsearch security features are enabled, the access to the results of a specific scroll ID is restricted to the user or API key that submitted the search. You can also use the scroll API to specify a new scroll parameter that extends or shortens the retention period for the search context. -::::{important} -Results from a scrolling search reflect the state of the index at the time of the initial search request. Subsequent indexing or document changes only affect later search and scroll requests. -:::: +IMPORTANT: Results from a scrolling search reflect the state of the index at the time of the initial search request. Subsequent indexing or document changes only affect later search and scroll requests. - -[Endpoint documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-request-body.html) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-scroll) ```ts client.scroll({ scroll_id }) ``` +### Arguments [_arguments_scroll] +#### Request (object) [_request_scroll] -### Arguments [_arguments_31] - -* **Request (object):** - - * **`scroll_id` (string)**: Scroll ID of the search. - * **`scroll` (Optional, string | -1 | 0)**: Period to retain the search context for scrolling. - * **`rest_total_hits_as_int` (Optional, boolean)**: If true, the API response’s hit.total property is returned as an integer. If false, the API response’s hit.total property is returned as an object. - - - -## search [_search] +- **`scroll_id` (string)**: The scroll ID of the search. +- **`scroll` (Optional, string | -1 | 0)**: The period to retain the search context for scrolling. +- **`rest_total_hits_as_int` (Optional, boolean)**: If true, the API response’s hit.total property is returned as an integer. If false, the API response’s hit.total property is returned as an object. +## client.search [_search] Run a search. -Get search hits that match the query defined in the request. You can provide search queries using the `q` query string parameter or the request body. If both are specified, only the query parameter is used. +Get search hits that match the query defined in the request. +You can provide search queries using the `q` query string parameter or the request body. +If both are specified, only the query parameter is used. -If the Elasticsearch security features are enabled, you must have the read index privilege for the target data stream, index, or alias. For cross-cluster search, refer to the documentation about configuring CCS privileges. To search a point in time (PIT) for an alias, you must have the `read` index privilege for the alias’s data streams or indices. +If the Elasticsearch security features are enabled, you must have the read index privilege for the target data stream, index, or alias. For cross-cluster search, refer to the documentation about configuring CCS privileges. +To search a point in time (PIT) for an alias, you must have the `read` index privilege for the alias's data streams or indices. **Search slicing** -When paging through a large number of documents, it can be helpful to split the search into multiple slices to consume them independently with the `slice` and `pit` properties. By default the splitting is done first on the shards, then locally on each shard. The local splitting partitions the shard into contiguous ranges based on Lucene document IDs. +When paging through a large number of documents, it can be helpful to split the search into multiple slices to consume them independently with the `slice` and `pit` properties. +By default the splitting is done first on the shards, then locally on each shard. +The local splitting partitions the shard into contiguous ranges based on Lucene document IDs. For instance if the number of shards is equal to 2 and you request 4 slices, the slices 0 and 2 are assigned to the first shard and the slices 1 and 3 are assigned to the second shard. -::::{important} -The same point-in-time ID should be used for all slices. If different PIT IDs are used, slices can overlap and miss documents. This situation can occur because the splitting criterion is based on Lucene document IDs, which are not stable across changes to the index. -:::: +IMPORTANT: The same point-in-time ID should be used for all slices. +If different PIT IDs are used, slices can overlap and miss documents. +This situation can occur because the splitting criterion is based on Lucene document IDs, which are not stable across changes to the index. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-search) ```ts client.search({ ... }) ``` +### Arguments [_arguments_search] + +#### Request (object) [_request_search] + +- **`index` (Optional, string | string[])**: A list of data streams, indices, and aliases to search. It supports wildcards (`*`). To search all data streams and indices, omit this parameter or use `*` or `_all`. +- **`aggregations` (Optional, Record)**: Defines the aggregations that are run as part of the search request. +- **`collapse` (Optional, { field, inner_hits, max_concurrent_group_searches, collapse })**: Collapses search results the values of the specified field. +- **`explain` (Optional, boolean)**: If `true`, the request returns detailed information about score computation as part of a hit. +- **`ext` (Optional, Record)**: Configuration of search extensions defined by Elasticsearch plugins. +- **`from` (Optional, number)**: The starting document offset, which must be non-negative. By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the `search_after` parameter. +- **`highlight` (Optional, { encoder, fields })**: Specifies the highlighter to use for retrieving highlighted snippets from one or more fields in your search results. +- **`track_total_hits` (Optional, boolean | number)**: Number of hits matching the query to count accurately. If `true`, the exact number of hits is returned at the cost of some performance. If `false`, the response does not include the total number of hits matching the query. +- **`indices_boost` (Optional, Record[])**: Boost the `_score` of documents from specified indices. The boost value is the factor by which scores are multiplied. A boost value greater than `1.0` increases the score. A boost value between `0` and `1.0` decreases the score. +- **`docvalue_fields` (Optional, { field, format, include_unmapped }[])**: An array of wildcard (`*`) field patterns. The request returns doc values for field names matching these patterns in the `hits.fields` property of the response. +- **`knn` (Optional, { field, query_vector, query_vector_builder, k, num_candidates, boost, filter, similarity, inner_hits, rescore_vector } | { field, query_vector, query_vector_builder, k, num_candidates, boost, filter, similarity, inner_hits, rescore_vector }[])**: The approximate kNN search to run. +- **`rank` (Optional, { rrf })**: The Reciprocal Rank Fusion (RRF) to use. +- **`min_score` (Optional, number)**: The minimum `_score` for matching documents. Documents with a lower `_score` are not included in search results and results collected by aggregations. +- **`post_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: Use the `post_filter` parameter to filter search results. The search hits are filtered after the aggregations are calculated. A post filter has no impact on the aggregation results. +- **`profile` (Optional, boolean)**: Set to `true` to return detailed timing information about the execution of individual components in a search request. NOTE: This is a debugging tool and adds significant overhead to search execution. +- **`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: The search definition using the Query DSL. +- **`rescore` (Optional, { window_size, query, learning_to_rank } | { window_size, query, learning_to_rank }[])**: Can be used to improve precision by reordering just the top (for example 100 - 500) documents returned by the `query` and `post_filter` phases. +- **`retriever` (Optional, { standard, knn, rrf, text_similarity_reranker, rule })**: A retriever is a specification to describe top documents returned from a search. A retriever replaces other elements of the search API that also return top documents such as `query` and `knn`. +- **`script_fields` (Optional, Record)**: Retrieve a script evaluation (based on different fields) for each hit. +- **`search_after` (Optional, number | number | string | boolean | null[])**: Used to retrieve the next page of hits using a set of sort values from the previous page. +- **`size` (Optional, number)**: The number of hits to return, which must not be negative. By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the `search_after` property. +- **`slice` (Optional, { field, id, max })**: Split a scrolled search into multiple slices that can be consumed independently. +- **`sort` (Optional, string | { _score, _doc, _geo_distance, _script } | string | { _score, _doc, _geo_distance, _script }[])**: A list of : pairs. +- **`_source` (Optional, boolean | { excludes, includes })**: The source fields that are returned for matching documents. These fields are returned in the `hits._source` property of the search response. If the `stored_fields` property is specified, the `_source` property defaults to `false`. Otherwise, it defaults to `true`. +- **`fields` (Optional, { field, format, include_unmapped }[])**: An array of wildcard (`*`) field patterns. The request returns values for field names matching these patterns in the `hits.fields` property of the response. +- **`suggest` (Optional, { text })**: Defines a suggester that provides similar looking terms based on a provided text. +- **`terminate_after` (Optional, number)**: The maximum number of documents to collect for each shard. If a query reaches this limit, Elasticsearch terminates the query early. Elasticsearch collects documents before sorting. IMPORTANT: Use with caution. Elasticsearch applies this property to each shard handling the request. When possible, let Elasticsearch perform early termination automatically. Avoid specifying this property for requests that target data streams with backing indices across multiple data tiers. If set to `0` (default), the query does not terminate early. +- **`timeout` (Optional, string)**: The period of time to wait for a response from each shard. If no response is received before the timeout expires, the request fails and returns an error. Defaults to no timeout. +- **`track_scores` (Optional, boolean)**: If `true`, calculate and return document scores, even if the scores are not used for sorting. +- **`version` (Optional, boolean)**: If `true`, the request returns the document version as part of a hit. +- **`seq_no_primary_term` (Optional, boolean)**: If `true`, the request returns sequence number and primary term of the last modification of each hit. +- **`stored_fields` (Optional, string | string[])**: A list of stored fields to return as part of a hit. If no fields are specified, no stored fields are included in the response. If this field is specified, the `_source` property defaults to `false`. You can pass `_source: true` to return both source fields and stored fields in the search response. +- **`pit` (Optional, { id, keep_alive })**: Limit the search to a point in time (PIT). If you provide a PIT, you cannot specify an `` in the request path. +- **`runtime_mappings` (Optional, Record)**: One or more runtime fields in the search request. These fields take precedence over mapped fields with the same name. +- **`stats` (Optional, string[])**: The stats groups to associate with the search. Each group maintains a statistics aggregation for its associated searches. You can retrieve these stats using the indices stats API. +- **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. +- **`allow_partial_search_results` (Optional, boolean)**: If `true` and there are shard request timeouts or shard failures, the request returns partial results. If `false`, it returns an error with no partial results. To override the default behavior, you can set the `search.default_allow_partial_results` cluster setting to `false`. +- **`analyzer` (Optional, string)**: The analyzer to use for the query string. This parameter can be used only when the `q` query string parameter is specified. +- **`analyze_wildcard` (Optional, boolean)**: If `true`, wildcard and prefix queries are analyzed. This parameter can be used only when the `q` query string parameter is specified. +- **`batched_reduce_size` (Optional, number)**: The number of shard results that should be reduced at once on the coordinating node. If the potential number of shards in the request can be large, this value should be used as a protection mechanism to reduce the memory overhead per search request. +- **`ccs_minimize_roundtrips` (Optional, boolean)**: If `true`, network round-trips between the coordinating node and the remote clusters are minimized when running cross-cluster search (CCS) requests. +- **`default_operator` (Optional, Enum("and" | "or"))**: The default operator for the query string query: `AND` or `OR`. This parameter can be used only when the `q` query string parameter is specified. +- **`df` (Optional, string)**: The field to use as a default when no field prefix is given in the query string. This parameter can be used only when the `q` query string parameter is specified. +- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports a list of values such as `open,hidden`. +- **`ignore_throttled` (Optional, boolean)**: If `true`, concrete, expanded or aliased indices will be ignored when frozen. +- **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. +- **`include_named_queries_score` (Optional, boolean)**: If `true`, the response includes the score contribution from any named queries. This functionality reruns each named query on every hit in a search response. Typically, this adds a small overhead to a request. However, using computationally expensive named queries on a large number of hits may add significant overhead. +- **`lenient` (Optional, boolean)**: If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. This parameter can be used only when the `q` query string parameter is specified. +- **`max_concurrent_shard_requests` (Optional, number)**: The number of concurrent shard requests per node that the search runs concurrently. This value should be used to limit the impact of the search on the cluster in order to limit the number of concurrent shard requests. +- **`preference` (Optional, string)**: The nodes and shards used for the search. By default, Elasticsearch selects from eligible nodes and shards using adaptive replica selection, accounting for allocation awareness. Valid values are: * `_only_local` to run the search only on shards on the local node. * `_local` to, if possible, run the search on shards on the local node, or if not, select shards using the default method. * `_only_nodes:,` to run the search on only the specified nodes IDs. If suitable shards exist on more than one selected node, use shards on those nodes using the default method. If none of the specified nodes are available, select shards from any available node using the default method. * `_prefer_nodes:,` to if possible, run the search on the specified nodes IDs. If not, select shards using the default method. `_shards:,` to run the search only on the specified shards. You can combine this value with other `preference` values. However, the `_shards` value must come first. For example: `_shards:2,3|_local`. `` (any string that does not start with `_`) to route searches with the same `` to the same shards in the same order. +- **`pre_filter_shard_size` (Optional, number)**: A threshold that enforces a pre-filter roundtrip to prefilter search shards based on query rewriting if the number of shards the search request expands to exceeds the threshold. This filter roundtrip can limit the number of shards significantly if for instance a shard can not match any documents based on its rewrite method (if date filters are mandatory to match but the shard bounds and the query are disjoint). When unspecified, the pre-filter phase is executed if any of these conditions is met: * The request targets more than 128 shards. * The request targets one or more read-only index. * The primary sort of the query targets an indexed field. +- **`request_cache` (Optional, boolean)**: If `true`, the caching of search results is enabled for requests where `size` is `0`. It defaults to index level settings. +- **`routing` (Optional, string)**: A custom value that is used to route operations to a specific shard. +- **`scroll` (Optional, string | -1 | 0)**: The period to retain the search context for scrolling. By default, this value cannot exceed `1d` (24 hours). You can change this limit by using the `search.max_keep_alive` cluster-level setting. +- **`search_type` (Optional, Enum("query_then_fetch" | "dfs_query_then_fetch"))**: Indicates how distributed term frequencies are calculated for relevance scoring. +- **`suggest_field` (Optional, string)**: The field to use for suggestions. +- **`suggest_mode` (Optional, Enum("missing" | "popular" | "always"))**: The suggest mode. This parameter can be used only when the `suggest_field` and `suggest_text` query string parameters are specified. +- **`suggest_size` (Optional, number)**: The number of suggestions to return. This parameter can be used only when the `suggest_field` and `suggest_text` query string parameters are specified. +- **`suggest_text` (Optional, string)**: The source text for which the suggestions should be returned. This parameter can be used only when the `suggest_field` and `suggest_text` query string parameters are specified. +- **`typed_keys` (Optional, boolean)**: If `true`, aggregation and suggester names are be prefixed by their respective types in the response. +- **`rest_total_hits_as_int` (Optional, boolean)**: Indicates whether `hits.total` should be rendered as an integer or an object in the rest search response. +- **`_source_excludes` (Optional, string | string[])**: A list of source fields to exclude from the response. You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. +- **`_source_includes` (Optional, string | string[])**: A list of source fields to include in the response. If this parameter is specified, only these source fields are returned. You can exclude fields from this subset using the `_source_excludes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. +- **`q` (Optional, string)**: A query in the Lucene query string syntax. Query parameter searches do not support the full Elasticsearch Query DSL but are handy for testing. IMPORTANT: This parameter overrides the query parameter in the request body. If both parameters are specified, documents matching the query request body parameter are not returned. +- **`force_synthetic_source` (Optional, boolean)**: Should this request force synthetic _source? Use this to test if the mapping supports synthetic _source and to get a sense of the worst case performance. Fetches with this enabled will be slower the enabling synthetic source natively in the index. + +## client.searchMvt [_search_mvt] +Search a vector tile. +Search a vector tile for geospatial values. +Before using this API, you should be familiar with the Mapbox vector tile specification. +The API returns results as a binary mapbox vector tile. -### Arguments [_arguments_32] - -* **Request (object):** - - * **`index` (Optional, string | string[])**: A list of data streams, indices, and aliases to search. It supports wildcards (`*`). To search all data streams and indices, omit this parameter or use `*` or `_all`. - * **`aggregations` (Optional, Record)**: Defines the aggregations that are run as part of the search request. - * **`collapse` (Optional, { field, inner_hits, max_concurrent_group_searches, collapse })**: Collapses search results the values of the specified field. - * **`explain` (Optional, boolean)**: If `true`, the request returns detailed information about score computation as part of a hit. - * **`ext` (Optional, Record)**: Configuration of search extensions defined by Elasticsearch plugins. - * **`from` (Optional, number)**: The starting document offset, which must be non-negative. By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the `search_after` parameter. - * **`highlight` (Optional, { encoder, fields })**: Specifies the highlighter to use for retrieving highlighted snippets from one or more fields in your search results. - * **`track_total_hits` (Optional, boolean | number)**: Number of hits matching the query to count accurately. If `true`, the exact number of hits is returned at the cost of some performance. If `false`, the response does not include the total number of hits matching the query. - * **`indices_boost` (Optional, Record[])**: Boost the `_score` of documents from specified indices. The boost value is the factor by which scores are multiplied. A boost value greater than `1.0` increases the score. A boost value between `0` and `1.0` decreases the score. - * **`docvalue_fields` (Optional, { field, format, include_unmapped }[])**: An array of wildcard (`*`) field patterns. The request returns doc values for field names matching these patterns in the `hits.fields` property of the response. - * **`knn` (Optional, { field, query_vector, query_vector_builder, k, num_candidates, boost, filter, similarity, inner_hits, rescore_vector } | { field, query_vector, query_vector_builder, k, num_candidates, boost, filter, similarity, inner_hits, rescore_vector }[])**: The approximate kNN search to run. - * **`rank` (Optional, { rrf })**: The Reciprocal Rank Fusion (RRF) to use. - * **`min_score` (Optional, number)**: The minimum `_score` for matching documents. Documents with a lower `_score` are not included in the search results. - * **`post_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: Use the `post_filter` parameter to filter search results. The search hits are filtered after the aggregations are calculated. A post filter has no impact on the aggregation results. - * **`profile` (Optional, boolean)**: Set to `true` to return detailed timing information about the execution of individual components in a search request. NOTE: This is a debugging tool and adds significant overhead to search execution. - * **`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: The search definition using the Query DSL. - * **`rescore` (Optional, { window_size, query, learning_to_rank } | { window_size, query, learning_to_rank }[])**: Can be used to improve precision by reordering just the top (for example 100 - 500) documents returned by the `query` and `post_filter` phases. - * **`retriever` (Optional, { standard, knn, rrf, text_similarity_reranker, rule })**: A retriever is a specification to describe top documents returned from a search. A retriever replaces other elements of the search API that also return top documents such as `query` and `knn`. - * **`script_fields` (Optional, Record)**: Retrieve a script evaluation (based on different fields) for each hit. - * **`search_after` (Optional, number | number | string | boolean | null | User-defined value[])**: Used to retrieve the next page of hits using a set of sort values from the previous page. - * **`size` (Optional, number)**: The number of hits to return, which must not be negative. By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the `search_after` property. - * **`slice` (Optional, { field, id, max })**: Split a scrolled search into multiple slices that can be consumed independently. - * **`sort` (Optional, string | { _score, _doc, _geo_distance, _script } | string | { _score, _doc, _geo_distance, _script }[])**: A list of : pairs. - * **`_source` (Optional, boolean | { excludes, includes })**: The source fields that are returned for matching documents. These fields are returned in the `hits._source` property of the search response. If the `stored_fields` property is specified, the `_source` property defaults to `false`. Otherwise, it defaults to `true`. - * **`fields` (Optional, { field, format, include_unmapped }[])**: An array of wildcard (`*`) field patterns. The request returns values for field names matching these patterns in the `hits.fields` property of the response. - * **`suggest` (Optional, { text })**: Defines a suggester that provides similar looking terms based on a provided text. - * **`terminate_after` (Optional, number)**: The maximum number of documents to collect for each shard. If a query reaches this limit, Elasticsearch terminates the query early. Elasticsearch collects documents before sorting. IMPORTANT: Use with caution. Elasticsearch applies this property to each shard handling the request. When possible, let Elasticsearch perform early termination automatically. Avoid specifying this property for requests that target data streams with backing indices across multiple data tiers. If set to `0` (default), the query does not terminate early. - * **`timeout` (Optional, string)**: The period of time to wait for a response from each shard. If no response is received before the timeout expires, the request fails and returns an error. Defaults to no timeout. - * **`track_scores` (Optional, boolean)**: If `true`, calculate and return document scores, even if the scores are not used for sorting. - * **`version` (Optional, boolean)**: If `true`, the request returns the document version as part of a hit. - * **`seq_no_primary_term` (Optional, boolean)**: If `true`, the request returns sequence number and primary term of the last modification of each hit. - * **`stored_fields` (Optional, string | string[])**: A list of stored fields to return as part of a hit. If no fields are specified, no stored fields are included in the response. If this field is specified, the `_source` property defaults to `false`. You can pass `_source: true` to return both source fields and stored fields in the search response. - * **`pit` (Optional, { id, keep_alive })**: Limit the search to a point in time (PIT). If you provide a PIT, you cannot specify an `` in the request path. - * **`runtime_mappings` (Optional, Record)**: One or more runtime fields in the search request. These fields take precedence over mapped fields with the same name. - * **`stats` (Optional, string[])**: The stats groups to associate with the search. Each group maintains a statistics aggregation for its associated searches. You can retrieve these stats using the indices stats API. - * **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. - * **`allow_partial_search_results` (Optional, boolean)**: If `true` and there are shard request timeouts or shard failures, the request returns partial results. If `false`, it returns an error with no partial results. To override the default behavior, you can set the `search.default_allow_partial_results` cluster setting to `false`. - * **`analyzer` (Optional, string)**: The analyzer to use for the query string. This parameter can be used only when the `q` query string parameter is specified. - * **`analyze_wildcard` (Optional, boolean)**: If `true`, wildcard and prefix queries are analyzed. This parameter can be used only when the `q` query string parameter is specified. - * **`batched_reduce_size` (Optional, number)**: The number of shard results that should be reduced at once on the coordinating node. If the potential number of shards in the request can be large, this value should be used as a protection mechanism to reduce the memory overhead per search request. - * **`ccs_minimize_roundtrips` (Optional, boolean)**: If `true`, network round-trips between the coordinating node and the remote clusters are minimized when running cross-cluster search (CCS) requests. - * **`default_operator` (Optional, Enum("and" | "or"))**: The default operator for the query string query: `AND` or `OR`. This parameter can be used only when the `q` query string parameter is specified. - * **`df` (Optional, string)**: The field to use as a default when no field prefix is given in the query string. This parameter can be used only when the `q` query string parameter is specified. - * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports a list of values such as `open,hidden`. - * **`ignore_throttled` (Optional, boolean)**: If `true`, concrete, expanded or aliased indices will be ignored when frozen. - * **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. - * **`include_named_queries_score` (Optional, boolean)**: If `true`, the response includes the score contribution from any named queries. This functionality reruns each named query on every hit in a search response. Typically, this adds a small overhead to a request. However, using computationally expensive named queries on a large number of hits may add significant overhead. - * **`lenient` (Optional, boolean)**: If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. This parameter can be used only when the `q` query string parameter is specified. - * **`max_concurrent_shard_requests` (Optional, number)**: The number of concurrent shard requests per node that the search runs concurrently. This value should be used to limit the impact of the search on the cluster in order to limit the number of concurrent shard requests. - * **`preference` (Optional, string)**: The nodes and shards used for the search. By default, Elasticsearch selects from eligible nodes and shards using adaptive replica selection, accounting for allocation awareness. Valid values are: * `_only_local` to run the search only on shards on the local node. * `_local` to, if possible, run the search on shards on the local node, or if not, select shards using the default method. * `_only_nodes:,` to run the search on only the specified nodes IDs. If suitable shards exist on more than one selected node, use shards on those nodes using the default method. If none of the specified nodes are available, select shards from any available node using the default method. * `_prefer_nodes:,` to if possible, run the search on the specified nodes IDs. If not, select shards using the default method. `_shards:,` to run the search only on the specified shards. You can combine this value with other `preference` values. However, the `_shards` value must come first. For example: `_shards:2,3|_local`. `` (any string that does not start with `_`) to route searches with the same `` to the same shards in the same order. - * **`pre_filter_shard_size` (Optional, number)**: A threshold that enforces a pre-filter roundtrip to prefilter search shards based on query rewriting if the number of shards the search request expands to exceeds the threshold. This filter roundtrip can limit the number of shards significantly if for instance a shard can not match any documents based on its rewrite method (if date filters are mandatory to match but the shard bounds and the query are disjoint). When unspecified, the pre-filter phase is executed if any of these conditions is met: * The request targets more than 128 shards. * The request targets one or more read-only index. * The primary sort of the query targets an indexed field. - * **`request_cache` (Optional, boolean)**: If `true`, the caching of search results is enabled for requests where `size` is `0`. It defaults to index level settings. - * **`routing` (Optional, string)**: A custom value that is used to route operations to a specific shard. - * **`scroll` (Optional, string | -1 | 0)**: The period to retain the search context for scrolling. By default, this value cannot exceed `1d` (24 hours). You can change this limit by using the `search.max_keep_alive` cluster-level setting. - * **`search_type` (Optional, Enum("query_then_fetch" | "dfs_query_then_fetch"))**: Indicates how distributed term frequencies are calculated for relevance scoring. - * **`suggest_field` (Optional, string)**: The field to use for suggestions. - * **`suggest_mode` (Optional, Enum("missing" | "popular" | "always"))**: The suggest mode. This parameter can be used only when the `suggest_field` and `suggest_text` query string parameters are specified. - * **`suggest_size` (Optional, number)**: The number of suggestions to return. This parameter can be used only when the `suggest_field` and `suggest_text` query string parameters are specified. - * **`suggest_text` (Optional, string)**: The source text for which the suggestions should be returned. This parameter can be used only when the `suggest_field` and `suggest_text` query string parameters are specified. - * **`typed_keys` (Optional, boolean)**: If `true`, aggregation and suggester names are be prefixed by their respective types in the response. - * **`rest_total_hits_as_int` (Optional, boolean)**: Indicates whether `hits.total` should be rendered as an integer or an object in the rest search response. - * **`_source_excludes` (Optional, string | string[])**: A list of source fields to exclude from the response. You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. - * **`_source_includes` (Optional, string | string[])**: A list of source fields to include in the response. If this parameter is specified, only these source fields are returned. You can exclude fields from this subset using the `_source_excludes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. - * **`q` (Optional, string)**: A query in the Lucene query string syntax. Query parameter searches do not support the full Elasticsearch Query DSL but are handy for testing. IMPORTANT: This parameter overrides the query parameter in the request body. If both parameters are specified, documents matching the query request body parameter are not returned. - * **`force_synthetic_source` (Optional, boolean)**: Should this request force synthetic _source? Use this to test if the mapping supports synthetic _source and to get a sense of the worst case performance. Fetches with this enabled will be slower the enabling synthetic source natively in the index. - - - -## search_mvt [_search_mvt] +Internally, Elasticsearch translates a vector tile search API request into a search containing: -Search a vector tile. +* A `geo_bounding_box` query on the ``. The query uses the `//` tile as a bounding box. +* A `geotile_grid` or `geohex_grid` aggregation on the ``. The `grid_agg` parameter determines the aggregation type. The aggregation uses the `//` tile as a bounding box. +* Optionally, a `geo_bounds` aggregation on the ``. The search only includes this aggregation if the `exact_bounds` parameter is `true`. +* If the optional parameter `with_labels` is `true`, the internal search will include a dynamic runtime field that calls the `getLabelPosition` function of the geometry doc value. This enables the generation of new point features containing suggested geometry labels, so that, for example, multi-polygons will have only one label. -Search a vector tile for geospatial values. +For example, Elasticsearch may translate a vector tile search API request with a `grid_agg` argument of `geotile` and an `exact_bounds` argument of `true` into the following search + +``` +GET my-index/_search +{ + "size": 10000, + "query": { + "geo_bounding_box": { + "my-geo-field": { + "top_left": { + "lat": -40.979898069620134, + "lon": -45 + }, + "bottom_right": { + "lat": -66.51326044311186, + "lon": 0 + } + } + } + }, + "aggregations": { + "grid": { + "geotile_grid": { + "field": "my-geo-field", + "precision": 11, + "size": 65536, + "bounds": { + "top_left": { + "lat": -40.979898069620134, + "lon": -45 + }, + "bottom_right": { + "lat": -66.51326044311186, + "lon": 0 + } + } + } + }, + "bounds": { + "geo_bounds": { + "field": "my-geo-field", + "wrap_longitude": false + } + } + } +} +``` -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-mvt) +The API returns results as a binary Mapbox vector tile. +Mapbox vector tiles are encoded as Google Protobufs (PBF). By default, the tile contains three layers: + +* A `hits` layer containing a feature for each `` value matching the `geo_bounding_box` query. +* An `aggs` layer containing a feature for each cell of the `geotile_grid` or `geohex_grid`. The layer only contains features for cells with matching data. +* A meta layer containing: + * A feature containing a bounding box. By default, this is the bounding box of the tile. + * Value ranges for any sub-aggregations on the `geotile_grid` or `geohex_grid`. + * Metadata for the search. + +The API only returns features that can display at its zoom level. +For example, if a polygon feature has no area at its zoom level, the API omits it. +The API returns errors as UTF-8 encoded JSON. + +IMPORTANT: You can specify several options for this API as either a query parameter or request body parameter. +If you specify both parameters, the query parameter takes precedence. + +**Grid precision for geotile** + +For a `grid_agg` of `geotile`, you can use cells in the `aggs` layer as tiles for lower zoom levels. +`grid_precision` represents the additional zoom levels available through these cells. The final precision is computed by as follows: ` + grid_precision`. +For example, if `` is 7 and `grid_precision` is 8, then the `geotile_grid` aggregation will use a precision of 15. +The maximum final precision is 29. +The `grid_precision` also determines the number of cells for the grid as follows: `(2^grid_precision) x (2^grid_precision)`. +For example, a value of 8 divides the tile into a grid of 256 x 256 cells. +The `aggs` layer only contains features for cells with matching data. + +**Grid precision for geohex** + +For a `grid_agg` of `geohex`, Elasticsearch uses `` and `grid_precision` to calculate a final precision as follows: ` + grid_precision`. + +This precision determines the H3 resolution of the hexagonal cells produced by the `geohex` aggregation. +The following table maps the H3 resolution for each precision. +For example, if `` is 3 and `grid_precision` is 3, the precision is 6. +At a precision of 6, hexagonal cells have an H3 resolution of 2. +If `` is 3 and `grid_precision` is 4, the precision is 7. +At a precision of 7, hexagonal cells have an H3 resolution of 3. + +| Precision | Unique tile bins | H3 resolution | Unique hex bins | Ratio | +| --------- | ---------------- | ------------- | ----------------| ----- | +| 1 | 4 | 0 | 122 | 30.5 | +| 2 | 16 | 0 | 122 | 7.625 | +| 3 | 64 | 1 | 842 | 13.15625 | +| 4 | 256 | 1 | 842 | 3.2890625 | +| 5 | 1024 | 2 | 5882 | 5.744140625 | +| 6 | 4096 | 2 | 5882 | 1.436035156 | +| 7 | 16384 | 3 | 41162 | 2.512329102 | +| 8 | 65536 | 3 | 41162 | 0.6280822754 | +| 9 | 262144 | 4 | 288122 | 1.099098206 | +| 10 | 1048576 | 4 | 288122 | 0.2747745514 | +| 11 | 4194304 | 5 | 2016842 | 0.4808526039 | +| 12 | 16777216 | 6 | 14117882 | 0.8414913416 | +| 13 | 67108864 | 6 | 14117882 | 0.2103728354 | +| 14 | 268435456 | 7 | 98825162 | 0.3681524172 | +| 15 | 1073741824 | 8 | 691776122 | 0.644266719 | +| 16 | 4294967296 | 8 | 691776122 | 0.1610666797 | +| 17 | 17179869184 | 9 | 4842432842 | 0.2818666889 | +| 18 | 68719476736 | 10 | 33897029882 | 0.4932667053 | +| 19 | 274877906944 | 11 | 237279209162 | 0.8632167343 | +| 20 | 1099511627776 | 11 | 237279209162 | 0.2158041836 | +| 21 | 4398046511104 | 12 | 1660954464122 | 0.3776573213 | +| 22 | 17592186044416 | 13 | 11626681248842 | 0.6609003122 | +| 23 | 70368744177664 | 13 | 11626681248842 | 0.165225078 | +| 24 | 281474976710656 | 14 | 81386768741882 | 0.2891438866 | +| 25 | 1125899906842620 | 15 | 569707381193162 | 0.5060018015 | +| 26 | 4503599627370500 | 15 | 569707381193162 | 0.1265004504 | +| 27 | 18014398509482000 | 15 | 569707381193162 | 0.03162511259 | +| 28 | 72057594037927900 | 15 | 569707381193162 | 0.007906278149 | +| 29 | 288230376151712000 | 15 | 569707381193162 | 0.001976569537 | + +Hexagonal cells don't align perfectly on a vector tile. +Some cells may intersect more than one vector tile. +To compute the H3 resolution for each precision, Elasticsearch compares the average density of hexagonal bins at each resolution with the average density of tile bins at each zoom level. +Elasticsearch uses the H3 resolution that is closest to the corresponding geotile density. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-search-mvt) ```ts client.searchMvt({ index, field, zoom, x, y }) ``` +### Arguments [_arguments_search_mvt] + +#### Request (object) [_request_search_mvt] + +- **`index` (string | string[])**: List of data streams, indices, or aliases to search +- **`field` (string)**: Field containing geospatial data to return +- **`zoom` (number)**: Zoom level for the vector tile to search +- **`x` (number)**: X coordinate for the vector tile to search +- **`y` (number)**: Y coordinate for the vector tile to search +- **`aggs` (Optional, Record)**: Sub-aggregations for the geotile_grid. It supports the following aggregation types: - `avg` - `boxplot` - `cardinality` - `extended stats` - `max` - `median absolute deviation` - `min` - `percentile` - `percentile-rank` - `stats` - `sum` - `value count` The aggregation names can't start with `_mvt_`. The `_mvt_` prefix is reserved for internal aggregations. +- **`buffer` (Optional, number)**: The size, in pixels, of a clipping buffer outside the tile. This allows renderers to avoid outline artifacts from geometries that extend past the extent of the tile. +- **`exact_bounds` (Optional, boolean)**: If `false`, the meta layer's feature is the bounding box of the tile. If `true`, the meta layer's feature is a bounding box resulting from a `geo_bounds` aggregation. The aggregation runs on values that intersect the `//` tile with `wrap_longitude` set to `false`. The resulting bounding box may be larger than the vector tile. +- **`extent` (Optional, number)**: The size, in pixels, of a side of the tile. Vector tiles are square with equal sides. +- **`fields` (Optional, string | string[])**: The fields to return in the `hits` layer. It supports wildcards (`*`). This parameter does not support fields with array values. Fields with array values may return inconsistent results. +- **`grid_agg` (Optional, Enum("geotile" | "geohex"))**: The aggregation used to create a grid for the `field`. +- **`grid_precision` (Optional, number)**: Additional zoom levels available through the aggs layer. For example, if `` is `7` and `grid_precision` is `8`, you can zoom in up to level 15. Accepts 0-8. If 0, results don't include the aggs layer. +- **`grid_type` (Optional, Enum("grid" | "point" | "centroid"))**: Determines the geometry type for features in the aggs layer. In the aggs layer, each feature represents a `geotile_grid` cell. If `grid, each feature is a polygon of the cells bounding box. If `point`, each feature is a Point that is the centroid of the cell. +- **`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: The query DSL used to filter documents for the search. +- **`runtime_mappings` (Optional, Record)**: Defines one or more runtime fields in the search request. These fields take precedence over mapped fields with the same name. +- **`size` (Optional, number)**: The maximum number of features to return in the hits layer. Accepts 0-10000. If 0, results don't include the hits layer. +- **`sort` (Optional, string | { _score, _doc, _geo_distance, _script } | string | { _score, _doc, _geo_distance, _script }[])**: Sort the features in the hits layer. By default, the API calculates a bounding box for each feature. It sorts features based on this box's diagonal length, from longest to shortest. +- **`track_total_hits` (Optional, boolean | number)**: The number of hits matching the query to count accurately. If `true`, the exact number of hits is returned at the cost of some performance. If `false`, the response does not include the total number of hits matching the query. +- **`with_labels` (Optional, boolean)**: If `true`, the hits and aggs layers will contain additional point features representing suggested label positions for the original features. * `Point` and `MultiPoint` features will have one of the points selected. * `Polygon` and `MultiPolygon` features will have a single point generated, either the centroid, if it is within the polygon, or another point within the polygon selected from the sorted triangle-tree. * `LineString` features will likewise provide a roughly central point selected from the triangle-tree. * The aggregation results will provide one central point for each aggregation bucket. All attributes from the original features will also be copied to the new label features. In addition, the new features will be distinguishable using the tag `_mvt_label_position`. + +## client.searchShards [_search_shards] +Get the search shards. +Get the indices and shards that a search request would be run against. +This information can be useful for working out issues or planning optimizations with routing and shard preferences. +When filtered aliases are used, the filter is returned as part of the `indices` section. -### Arguments [_arguments_33] - -* **Request (object):** - - * **`index` (string | string[])**: List of data streams, indices, or aliases to search - * **`field` (string)**: Field containing geospatial data to return - * **`zoom` (number)**: Zoom level for the vector tile to search - * **`x` (number)**: X coordinate for the vector tile to search - * **`y` (number)**: Y coordinate for the vector tile to search - * **`aggs` (Optional, Record)**: Sub-aggregations for the geotile_grid. Supports the following aggregation types: - avg - cardinality - max - min - sum - * **`buffer` (Optional, number)**: Size, in pixels, of a clipping buffer outside the tile. This allows renderers to avoid outline artifacts from geometries that extend past the extent of the tile. - * **`exact_bounds` (Optional, boolean)**: If false, the meta layer’s feature is the bounding box of the tile. If true, the meta layer’s feature is a bounding box resulting from a geo_bounds aggregation. The aggregation runs on values that intersect the // tile with wrap_longitude set to false. The resulting bounding box may be larger than the vector tile. - * **`extent` (Optional, number)**: Size, in pixels, of a side of the tile. Vector tiles are square with equal sides. - * **`fields` (Optional, string | string[])**: Fields to return in the `hits` layer. Supports wildcards (`*`). This parameter does not support fields with array values. Fields with array values may return inconsistent results. - * **`grid_agg` (Optional, Enum("geotile" | "geohex"))**: Aggregation used to create a grid for the `field`. - * **`grid_precision` (Optional, number)**: Additional zoom levels available through the aggs layer. For example, if is 7 and grid_precision is 8, you can zoom in up to level 15. Accepts 0-8. If 0, results don’t include the aggs layer. - * **`grid_type` (Optional, Enum("grid" | "point" | "centroid"))**: Determines the geometry type for features in the aggs layer. In the aggs layer, each feature represents a geotile_grid cell. If *grid* each feature is a Polygon of the cells bounding box. If *point* each feature is a Point that is the centroid of the cell. - * **`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: Query DSL used to filter documents for the search. - * **`runtime_mappings` (Optional, Record)**: Defines one or more runtime fields in the search request. These fields take precedence over mapped fields with the same name. - * **`size` (Optional, number)**: Maximum number of features to return in the hits layer. Accepts 0-10000. If 0, results don’t include the hits layer. - * **`sort` (Optional, string | { _score, _doc, _geo_distance, _script } | string | { _score, _doc, _geo_distance, _script }[])**: Sorts features in the hits layer. By default, the API calculates a bounding box for each feature. It sorts features based on this box’s diagonal length, from longest to shortest. - * **`track_total_hits` (Optional, boolean | number)**: Number of hits matching the query to count accurately. If `true`, the exact number of hits is returned at the cost of some performance. If `false`, the response does not include the total number of hits matching the query. - * **`with_labels` (Optional, boolean)**: If `true`, the hits and aggs layers will contain additional point features representing suggested label positions for the original features. +If the Elasticsearch security features are enabled, you must have the `view_index_metadata` or `manage` index privilege for the target data stream, index, or alias. +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-search-shards) +```ts +client.searchShards({ ... }) +``` +### Arguments [_arguments_search_shards] -## search_shards [_search_shards] +#### Request (object) [_request_search_shards] -Get the search shards. +- **`index` (Optional, string | string[])**: A list of data streams, indices, and aliases to search. It supports wildcards (`*`). To search all data streams and indices, omit this parameter or use `*` or `_all`. +- **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. +- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. +- **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. +- **`local` (Optional, boolean)**: If `true`, the request retrieves information from the local node only. +- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. If the master node is not available before the timeout expires, the request fails and returns an error. IT can also be set to `-1` to indicate that the request should never timeout. +- **`preference` (Optional, string)**: The node or shard the operation should be performed on. It is random by default. +- **`routing` (Optional, string)**: A custom value used to route operations to a specific shard. -Get the indices and shards that a search request would be run against. This information can be useful for working out issues or planning optimizations with routing and shard preferences. When filtered aliases are used, the filter is returned as part of the indices section. +## client.searchTemplate [_search_template] +Run a search with a search template. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-shards) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-search-template) ```ts -client.searchShards({ ... }) +client.searchTemplate({ ... }) ``` +### Arguments [_arguments_search_template] + +#### Request (object) [_request_search_template] + +- **`index` (Optional, string | string[])**: A list of data streams, indices, and aliases to search. It supports wildcards (`*`). +- **`explain` (Optional, boolean)**: If `true`, returns detailed information about score calculation as part of each hit. If you specify both this and the `explain` query parameter, the API uses only the query parameter. +- **`id` (Optional, string)**: The ID of the search template to use. If no `source` is specified, this parameter is required. +- **`params` (Optional, Record)**: Key-value pairs used to replace Mustache variables in the template. The key is the variable name. The value is the variable value. +- **`profile` (Optional, boolean)**: If `true`, the query execution is profiled. +- **`source` (Optional, string | { aggregations, collapse, explain, ext, from, highlight, track_total_hits, indices_boost, docvalue_fields, knn, rank, min_score, post_filter, profile, query, rescore, retriever, script_fields, search_after, size, slice, sort, _source, fields, suggest, terminate_after, timeout, track_scores, version, seq_no_primary_term, stored_fields, pit, runtime_mappings, stats })**: An inline search template. Supports the same parameters as the search API's request body. It also supports Mustache variables. If no `id` is specified, this parameter is required. +- **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. +- **`ccs_minimize_roundtrips` (Optional, boolean)**: If `true`, network round-trips are minimized for cross-cluster search requests. +- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. +- **`ignore_throttled` (Optional, boolean)**: If `true`, specified concrete, expanded, or aliased indices are not included in the response when throttled. +- **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. +- **`preference` (Optional, string)**: The node or shard the operation should be performed on. It is random by default. +- **`routing` (Optional, string)**: A custom value used to route operations to a specific shard. +- **`scroll` (Optional, string | -1 | 0)**: Specifies how long a consistent view of the index should be maintained for scrolled search. +- **`search_type` (Optional, Enum("query_then_fetch" | "dfs_query_then_fetch"))**: The type of the search operation. +- **`rest_total_hits_as_int` (Optional, boolean)**: If `true`, `hits.total` is rendered as an integer in the response. If `false`, it is rendered as an object. +- **`typed_keys` (Optional, boolean)**: If `true`, the response prefixes aggregation and suggester names with their respective types. + +## client.termsEnum [_terms_enum] +Get terms in an index. +Discover terms that match a partial string in an index. +This API is designed for low-latency look-ups used in auto-complete scenarios. -### Arguments [_arguments_34] - -* **Request (object):** - - * **`index` (Optional, string | string[])**: Returns the indices and shards that a search request would be executed against. - * **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. - * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. - * **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. - * **`local` (Optional, boolean)**: If `true`, the request retrieves information from the local node only. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. - * **`preference` (Optional, string)**: Specifies the node or shard the operation should be performed on. Random by default. - * **`routing` (Optional, string)**: Custom value used to route operations to a specific shard. - - - -## search_template [_search_template] - -Run a search with a search template. +> info +> The terms enum API may return terms from deleted documents. Deleted documents are initially only marked as deleted. It is not until their segments are merged that documents are actually deleted. Until that happens, the terms enum API will return terms from these documents. -[Endpoint documentation](docs-content://solutions/search/search-templates.md) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-terms-enum) ```ts -client.searchTemplate({ ... }) +client.termsEnum({ index, field }) ``` +### Arguments [_arguments_terms_enum] +#### Request (object) [_request_terms_enum] -### Arguments [_arguments_35] - -* **Request (object):** - - * **`index` (Optional, string | string[])**: List of data streams, indices, and aliases to search. Supports wildcards (*). - * **`explain` (Optional, boolean)**: If `true`, returns detailed information about score calculation as part of each hit. - * **`id` (Optional, string)**: ID of the search template to use. If no source is specified, this parameter is required. - * **`params` (Optional, Record)**: Key-value pairs used to replace Mustache variables in the template. The key is the variable name. The value is the variable value. - * **`profile` (Optional, boolean)**: If `true`, the query execution is profiled. - * **`source` (Optional, string)**: An inline search template. Supports the same parameters as the search API’s request body. Also supports Mustache variables. If no id is specified, this parameter is required. - * **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. - * **`ccs_minimize_roundtrips` (Optional, boolean)**: If `true`, network round-trips are minimized for cross-cluster search requests. - * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. - * **`ignore_throttled` (Optional, boolean)**: If `true`, specified concrete, expanded, or aliased indices are not included in the response when throttled. - * **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. - * **`preference` (Optional, string)**: Specifies the node or shard the operation should be performed on. Random by default. - * **`routing` (Optional, string)**: Custom value used to route operations to a specific shard. - * **`scroll` (Optional, string | -1 | 0)**: Specifies how long a consistent view of the index should be maintained for scrolled search. - * **`search_type` (Optional, Enum("query_then_fetch" | "dfs_query_then_fetch"))**: The type of the search operation. - * **`rest_total_hits_as_int` (Optional, boolean)**: If true, hits.total are rendered as an integer in the response. - * **`typed_keys` (Optional, boolean)**: If `true`, the response prefixes aggregation and suggester names with their respective types. - - +- **`index` (string)**: A list of data streams, indices, and index aliases to search. Wildcard (`*`) expressions are supported. To search all data streams or indices, omit this parameter or use `*` or `_all`. +- **`field` (string)**: The string to match at the start of indexed terms. If not provided, all terms in the field are considered. +- **`size` (Optional, number)**: The number of matching terms to return. +- **`timeout` (Optional, string | -1 | 0)**: The maximum length of time to spend collecting results. If the timeout is exceeded the `complete` flag set to `false` in the response and the results may be partial or empty. +- **`case_insensitive` (Optional, boolean)**: When `true`, the provided search string is matched against index terms without case sensitivity. +- **`index_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: Filter an index shard if the provided query rewrites to `match_none`. +- **`string` (Optional, string)**: The string to match at the start of indexed terms. If it is not provided, all terms in the field are considered. > info > The prefix string cannot be larger than the largest possible keyword value, which is Lucene's term byte-length limit of 32766. +- **`search_after` (Optional, string)**: The string after which terms in the index should be returned. It allows for a form of pagination if the last result from one request is passed as the `search_after` parameter for a subsequent request. -## terms_enum [_terms_enum] - -Get terms in an index. - -Discover terms that match a partial string in an index. This "terms enum" API is designed for low-latency look-ups used in auto-complete scenarios. - -If the `complete` property in the response is false, the returned terms set may be incomplete and should be treated as approximate. This can occur due to a few reasons, such as a request timeout or a node error. - -::::{note} -The terms enum API may return terms from deleted documents. Deleted documents are initially only marked as deleted. It is not until their segments are merged that documents are actually deleted. Until that happens, the terms enum API will return terms from these documents. -:::: +## client.termvectors [_termvectors] +Get term vector information. +Get information and statistics about terms in the fields of a particular document. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-terms-enum) +You can retrieve term vectors for documents stored in the index or for artificial documents passed in the body of the request. +You can specify the fields you are interested in through the `fields` parameter or by adding the fields to the request body. +For example: -```ts -client.termsEnum({ index, field }) +``` +GET /my-index-000001/_termvectors/1?fields=message ``` +Fields can be specified using wildcards, similar to the multi match query. -### Arguments [_arguments_36] +Term vectors are real-time by default, not near real-time. +This can be changed by setting `realtime` parameter to `false`. -* **Request (object):** +You can request three types of values: _term information_, _term statistics_, and _field statistics_. +By default, all term information and field statistics are returned for all fields but term statistics are excluded. - * **`index` (string)**: List of data streams, indices, and index aliases to search. Wildcard (*) expressions are supported. - * **`field` (string)**: The string to match at the start of indexed terms. If not provided, all terms in the field are considered. - * **`size` (Optional, number)**: How many matching terms to return. - * **`timeout` (Optional, string | -1 | 0)**: The maximum length of time to spend collecting results. Defaults to "1s" (one second). If the timeout is exceeded the complete flag set to false in the response and the results may be partial or empty. - * **`case_insensitive` (Optional, boolean)**: When true the provided search string is matched against index terms without case sensitivity. - * **`index_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: Allows to filter an index shard if the provided query rewrites to match_none. - * **`string` (Optional, string)**: The string after which terms in the index should be returned. Allows for a form of pagination if the last result from one request is passed as the search_after parameter for a subsequent request. - * **`search_after` (Optional, string)** +**Term information** +* term frequency in the field (always returned) +* term positions (`positions: true`) +* start and end offsets (`offsets: true`) +* term payloads (`payloads: true`), as base64 encoded bytes +If the requested information wasn't stored in the index, it will be computed on the fly if possible. +Additionally, term vectors could be computed for documents not even existing in the index, but instead provided by the user. -## termvectors [_termvectors] +> warn +> Start and end offsets assume UTF-16 encoding is being used. If you want to use these offsets in order to get the original text that produced this token, you should make sure that the string you are taking a sub-string of is also encoded using UTF-16. -Get term vector information. +**Behaviour** -Get information and statistics about terms in the fields of a particular document. +The term and field statistics are not accurate. +Deleted documents are not taken into account. +The information is only retrieved for the shard the requested document resides in. +The term and field statistics are therefore only useful as relative measures whereas the absolute numbers have no meaning in this context. +By default, when requesting term vectors of artificial documents, a shard to get the statistics from is randomly selected. +Use `routing` only to hit a particular shard. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-termvectors) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-termvectors) ```ts client.termvectors({ index }) ``` - - -### Arguments [_arguments_37] - -* **Request (object):** - - * **`index` (string)**: Name of the index that contains the document. - * **`id` (Optional, string)**: Unique identifier of the document. - * **`doc` (Optional, object)**: An artificial document (a document not present in the index) for which you want to retrieve term vectors. - * **`filter` (Optional, { max_doc_freq, max_num_terms, max_term_freq, max_word_length, min_doc_freq, min_term_freq, min_word_length })**: Filter terms based on their tf-idf scores. - * **`per_field_analyzer` (Optional, Record)**: Overrides the default per-field analyzer. - * **`fields` (Optional, string | string[])**: List or wildcard expressions of fields to include in the statistics. Used as the default list unless a specific field list is provided in the `completion_fields` or `fielddata_fields` parameters. - * **`field_statistics` (Optional, boolean)**: If `true`, the response includes the document count, sum of document frequencies, and sum of total term frequencies. - * **`offsets` (Optional, boolean)**: If `true`, the response includes term offsets. - * **`payloads` (Optional, boolean)**: If `true`, the response includes term payloads. - * **`positions` (Optional, boolean)**: If `true`, the response includes term positions. - * **`preference` (Optional, string)**: Specifies the node or shard the operation should be performed on. Random by default. - * **`realtime` (Optional, boolean)**: If true, the request is real-time as opposed to near-real-time. - * **`routing` (Optional, string)**: Custom value used to route operations to a specific shard. - * **`term_statistics` (Optional, boolean)**: If `true`, the response includes term frequency and document frequency. - * **`version` (Optional, number)**: If `true`, returns the document version as part of a hit. - * **`version_type` (Optional, Enum("internal" | "external" | "external_gte" | "force"))**: Specific version type. - - - -## update [_update] - +### Arguments [_arguments_termvectors] + +#### Request (object) [_request_termvectors] + +- **`index` (string)**: The name of the index that contains the document. +- **`id` (Optional, string)**: A unique identifier for the document. +- **`doc` (Optional, object)**: An artificial document (a document not present in the index) for which you want to retrieve term vectors. +- **`filter` (Optional, { max_doc_freq, max_num_terms, max_term_freq, max_word_length, min_doc_freq, min_term_freq, min_word_length })**: Filter terms based on their tf-idf scores. This could be useful in order find out a good characteristic vector of a document. This feature works in a similar manner to the second phase of the More Like This Query. +- **`per_field_analyzer` (Optional, Record)**: Override the default per-field analyzer. This is useful in order to generate term vectors in any fashion, especially when using artificial documents. When providing an analyzer for a field that already stores term vectors, the term vectors will be regenerated. +- **`fields` (Optional, string | string[])**: A list of fields to include in the statistics. It is used as the default list unless a specific field list is provided in the `completion_fields` or `fielddata_fields` parameters. +- **`field_statistics` (Optional, boolean)**: If `true`, the response includes: * The document count (how many documents contain this field). * The sum of document frequencies (the sum of document frequencies for all terms in this field). * The sum of total term frequencies (the sum of total term frequencies of each term in this field). +- **`offsets` (Optional, boolean)**: If `true`, the response includes term offsets. +- **`payloads` (Optional, boolean)**: If `true`, the response includes term payloads. +- **`positions` (Optional, boolean)**: If `true`, the response includes term positions. +- **`term_statistics` (Optional, boolean)**: If `true`, the response includes: * The total term frequency (how often a term occurs in all documents). * The document frequency (the number of documents containing the current term). By default these values are not returned since term statistics can have a serious performance impact. +- **`routing` (Optional, string)**: A custom value that is used to route operations to a specific shard. +- **`version` (Optional, number)**: If `true`, returns the document version as part of a hit. +- **`version_type` (Optional, Enum("internal" | "external" | "external_gte" | "force"))**: The version type. +- **`preference` (Optional, string)**: The node or shard the operation should be performed on. It is random by default. +- **`realtime` (Optional, boolean)**: If true, the request is real-time as opposed to near-real-time. + +## client.update [_update] Update a document. Update a document by running a script or passing a partial document. If the Elasticsearch security features are enabled, you must have the `index` or `write` index privilege for the target index or index alias. -The script can update, delete, or skip modifying the document. The API also supports passing a partial document, which is merged into the existing document. To fully replace an existing document, use the index API. This operation: +The script can update, delete, or skip modifying the document. +The API also supports passing a partial document, which is merged into the existing document. +To fully replace an existing document, use the index API. +This operation: * Gets the document (collocated with the shard) from the index. * Runs the specified script. @@ -1873,45 +2037,44 @@ The script can update, delete, or skip modifying the document. The API also supp The document must still be reindexed, but using this API removes some network roundtrips and reduces chances of version conflicts between the GET and the index operation. -The `_source` field must be enabled to use this API. In addition to `_source`, you can access the following variables through the `ctx` map: `_index`, `_type`, `_id`, `_version`, `_routing`, and `_now` (the current timestamp). +The `_source` field must be enabled to use this API. +In addition to `_source`, you can access the following variables through the `ctx` map: `_index`, `_type`, `_id`, `_version`, `_routing`, and `_now` (the current timestamp). -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-update) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-update) ```ts client.update({ id, index }) ``` - - -### Arguments [_arguments_38] - -* **Request (object):** - - * **`id` (string)**: A unique identifier for the document to be updated. - * **`index` (string)**: The name of the target index. By default, the index is created automatically if it doesn’t exist. - * **`detect_noop` (Optional, boolean)**: If `true`, the `result` in the response is set to `noop` (no operation) when there are no changes to the document. - * **`doc` (Optional, object)**: A partial update to an existing document. If both `doc` and `script` are specified, `doc` is ignored. - * **`doc_as_upsert` (Optional, boolean)**: If `true`, use the contents of *doc* as the value of *upsert*. NOTE: Using ingest pipelines with `doc_as_upsert` is not supported. - * **`script` (Optional, { source, id, params, lang, options })**: The script to run to update the document. - * **`scripted_upsert` (Optional, boolean)**: If `true`, run the script whether or not the document exists. - * **`_source` (Optional, boolean | { excludes, includes })**: If `false`, turn off source retrieval. You can also specify a list of the fields you want to retrieve. - * **`upsert` (Optional, object)**: If the document does not already exist, the contents of *upsert* are inserted as a new document. If the document exists, the *script* is run. - * **`if_primary_term` (Optional, number)**: Only perform the operation if the document has this primary term. - * **`if_seq_no` (Optional, number)**: Only perform the operation if the document has this sequence number. - * **`lang` (Optional, string)**: The script language. - * **`refresh` (Optional, Enum(true | false | "wait_for"))**: If *true*, Elasticsearch refreshes the affected shards to make this operation visible to search. If *wait_for*, it waits for a refresh to make this operation visible to search. If *false*, it does nothing with refreshes. - * **`require_alias` (Optional, boolean)**: If `true`, the destination must be an index alias. - * **`retry_on_conflict` (Optional, number)**: The number of times the operation should be retried when a conflict occurs. - * **`routing` (Optional, string)**: A custom value used to route operations to a specific shard. - * **`timeout` (Optional, string | -1 | 0)**: The period to wait for the following operations: dynamic mapping updates and waiting for active shards. Elasticsearch waits for at least the timeout period before failing. The actual wait time could be longer, particularly when multiple waits occur. - * **`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))**: The number of copies of each shard that must be active before proceeding with the operation. Set to *all* or any positive integer up to the total number of shards in the index (`number_of_replicas`+1). The default value of `1` means it waits for each primary shard to be active. - * **`_source_excludes` (Optional, string | string[])**: The source fields you want to exclude. - * **`_source_includes` (Optional, string | string[])**: The source fields you want to retrieve. - - - -## update_by_query [_update_by_query] - -Update documents. Updates documents that match the specified query. If no query is specified, performs an update on every document in the data stream or index without modifying the source, which is useful for picking up mapping changes. +### Arguments [_arguments_update] + +#### Request (object) [_request_update] + +- **`id` (string)**: A unique identifier for the document to be updated. +- **`index` (string)**: The name of the target index. By default, the index is created automatically if it doesn't exist. +- **`detect_noop` (Optional, boolean)**: If `true`, the `result` in the response is set to `noop` (no operation) when there are no changes to the document. +- **`doc` (Optional, object)**: A partial update to an existing document. If both `doc` and `script` are specified, `doc` is ignored. +- **`doc_as_upsert` (Optional, boolean)**: If `true`, use the contents of 'doc' as the value of 'upsert'. NOTE: Using ingest pipelines with `doc_as_upsert` is not supported. +- **`script` (Optional, { source, id, params, lang, options })**: The script to run to update the document. +- **`scripted_upsert` (Optional, boolean)**: If `true`, run the script whether or not the document exists. +- **`_source` (Optional, boolean | { excludes, includes })**: If `false`, turn off source retrieval. You can also specify a list of the fields you want to retrieve. +- **`upsert` (Optional, object)**: If the document does not already exist, the contents of 'upsert' are inserted as a new document. If the document exists, the 'script' is run. +- **`if_primary_term` (Optional, number)**: Only perform the operation if the document has this primary term. +- **`if_seq_no` (Optional, number)**: Only perform the operation if the document has this sequence number. +- **`include_source_on_error` (Optional, boolean)**: True or false if to include the document source in the error message in case of parsing errors. +- **`lang` (Optional, string)**: The script language. +- **`refresh` (Optional, Enum(true | false | "wait_for"))**: If 'true', Elasticsearch refreshes the affected shards to make this operation visible to search. If 'wait_for', it waits for a refresh to make this operation visible to search. If 'false', it does nothing with refreshes. +- **`require_alias` (Optional, boolean)**: If `true`, the destination must be an index alias. +- **`retry_on_conflict` (Optional, number)**: The number of times the operation should be retried when a conflict occurs. +- **`routing` (Optional, string)**: A custom value used to route operations to a specific shard. +- **`timeout` (Optional, string | -1 | 0)**: The period to wait for the following operations: dynamic mapping updates and waiting for active shards. Elasticsearch waits for at least the timeout period before failing. The actual wait time could be longer, particularly when multiple waits occur. +- **`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))**: The number of copies of each shard that must be active before proceeding with the operation. Set to 'all' or any positive integer up to the total number of shards in the index (`number_of_replicas`+1). The default value of `1` means it waits for each primary shard to be active. +- **`_source_excludes` (Optional, string | string[])**: The source fields you want to exclude. +- **`_source_includes` (Optional, string | string[])**: The source fields you want to retrieve. + +## client.updateByQuery [_update_by_query] +Update documents. +Updates documents that match the specified query. +If no query is specified, performs an update on every document in the data stream or index without modifying the source, which is useful for picking up mapping changes. If the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or alias: @@ -1920,33 +2083,45 @@ If the Elasticsearch security features are enabled, you must have the following You can specify the query criteria in the request URI or the request body using the same syntax as the search API. -When you submit an update by query request, Elasticsearch gets a snapshot of the data stream or index when it begins processing the request and updates matching documents using internal versioning. When the versions match, the document is updated and the version number is incremented. If a document changes between the time that the snapshot is taken and the update operation is processed, it results in a version conflict and the operation fails. You can opt to count version conflicts instead of halting and returning by setting `conflicts` to `proceed`. Note that if you opt to count version conflicts, the operation could attempt to update more documents from the source than `max_docs` until it has successfully updated `max_docs` documents or it has gone through every document in the source query. - -::::{note} -Documents with a version equal to 0 cannot be updated using update by query because internal versioning does not support 0 as a valid version number. -:::: +When you submit an update by query request, Elasticsearch gets a snapshot of the data stream or index when it begins processing the request and updates matching documents using internal versioning. +When the versions match, the document is updated and the version number is incremented. +If a document changes between the time that the snapshot is taken and the update operation is processed, it results in a version conflict and the operation fails. +You can opt to count version conflicts instead of halting and returning by setting `conflicts` to `proceed`. +Note that if you opt to count version conflicts, the operation could attempt to update more documents from the source than `max_docs` until it has successfully updated `max_docs` documents or it has gone through every document in the source query. +NOTE: Documents with a version equal to 0 cannot be updated using update by query because internal versioning does not support 0 as a valid version number. -While processing an update by query request, Elasticsearch performs multiple search requests sequentially to find all of the matching documents. A bulk update request is performed for each batch of matching documents. Any query or update failures cause the update by query request to fail and the failures are shown in the response. Any update requests that completed successfully still stick, they are not rolled back. +While processing an update by query request, Elasticsearch performs multiple search requests sequentially to find all of the matching documents. +A bulk update request is performed for each batch of matching documents. +Any query or update failures cause the update by query request to fail and the failures are shown in the response. +Any update requests that completed successfully still stick, they are not rolled back. **Throttling update requests** -To control the rate at which update by query issues batches of update operations, you can set `requests_per_second` to any positive decimal number. This pads each batch with a wait time to throttle the rate. Set `requests_per_second` to `-1` to turn off throttling. +To control the rate at which update by query issues batches of update operations, you can set `requests_per_second` to any positive decimal number. +This pads each batch with a wait time to throttle the rate. +Set `requests_per_second` to `-1` to turn off throttling. -Throttling uses a wait time between batches so that the internal scroll requests can be given a timeout that takes the request padding into account. The padding time is the difference between the batch size divided by the `requests_per_second` and the time spent writing. By default the batch size is 1000, so if `requests_per_second` is set to `500`: +Throttling uses a wait time between batches so that the internal scroll requests can be given a timeout that takes the request padding into account. +The padding time is the difference between the batch size divided by the `requests_per_second` and the time spent writing. +By default the batch size is 1000, so if `requests_per_second` is set to `500`: ``` target_time = 1000 / 500 per second = 2 seconds wait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds ``` -Since the batch is issued as a single _bulk request, large batch sizes cause Elasticsearch to create many requests and wait before starting the next set. This is "bursty" instead of "smooth". +Since the batch is issued as a single _bulk request, large batch sizes cause Elasticsearch to create many requests and wait before starting the next set. +This is "bursty" instead of "smooth". **Slicing** -Update by query supports sliced scroll to parallelize the update process. This can improve efficiency and provide a convenient way to break the request down into smaller parts. +Update by query supports sliced scroll to parallelize the update process. +This can improve efficiency and provide a convenient way to break the request down into smaller parts. -Setting `slices` to `auto` chooses a reasonable number for most data streams and indices. This setting will use one slice per shard, up to a certain limit. If there are multiple source data streams or indices, it will choose the number of slices based on the index or backing index with the smallest number of shards. +Setting `slices` to `auto` chooses a reasonable number for most data streams and indices. +This setting will use one slice per shard, up to a certain limit. +If there are multiple source data streams or indices, it will choose the number of slices based on the index or backing index with the smallest number of shards. Adding `slices` to `_update_by_query` just automates the manual process of creating sub-requests, which means it has some quirks: @@ -1955,11 +2130,11 @@ Adding `slices` to `_update_by_query` just automates the manual process of creat * These sub-requests are individually addressable for things like cancellation and rethrottling. * Rethrottling the request with `slices` will rethrottle the unfinished sub-request proportionally. * Canceling the request with slices will cancel each sub-request. -* Due to the nature of slices each sub-request won’t get a perfectly even portion of the documents. All documents will be addressed, but some slices may be larger than others. Expect larger slices to have a more even distribution. +* Due to the nature of slices each sub-request won't get a perfectly even portion of the documents. All documents will be addressed, but some slices may be larger than others. Expect larger slices to have a more even distribution. * Parameters like `requests_per_second` and `max_docs` on a request with slices are distributed proportionally to each sub-request. Combine that with the point above about distribution being uneven and you should conclude that using `max_docs` with `slices` might not result in exactly `max_docs` documents being updated. * Each sub-request gets a slightly different snapshot of the source data stream or index though these are all taken at approximately the same time. -If you’re slicing manually or otherwise tuning automatic slicing, keep in mind that: +If you're slicing manually or otherwise tuning automatic slicing, keep in mind that: * Query performance is most efficient when the number of slices is equal to the number of shards in the index or backing index. If that number is large (for example, 500), choose a lower number as too many slices hurts performance. Setting slices higher than the number of shards generally does not improve efficiency and adds overhead. * Update performance scales linearly across available resources with the number of slices. @@ -1968,10898 +2143,11730 @@ Whether query or update performance dominates the runtime depends on the documen **Update the document source** -Update by query supports scripts to update the document source. As with the update API, you can set `ctx.op` to change the operation that is performed. +Update by query supports scripts to update the document source. +As with the update API, you can set `ctx.op` to change the operation that is performed. -Set `ctx.op = "noop"` if your script decides that it doesn’t have to make any changes. The update by query operation skips updating the document and increments the `noop` counter. +Set `ctx.op = "noop"` if your script decides that it doesn't have to make any changes. +The update by query operation skips updating the document and increments the `noop` counter. -Set `ctx.op = "delete"` if your script decides that the document should be deleted. The update by query operation deletes the document and increments the `deleted` counter. +Set `ctx.op = "delete"` if your script decides that the document should be deleted. +The update by query operation deletes the document and increments the `deleted` counter. -Update by query supports only `index`, `noop`, and `delete`. Setting `ctx.op` to anything else is an error. Setting any other field in `ctx` is an error. This API enables you to only modify the source of matching documents; you cannot move them. +Update by query supports only `index`, `noop`, and `delete`. +Setting `ctx.op` to anything else is an error. +Setting any other field in `ctx` is an error. +This API enables you to only modify the source of matching documents; you cannot move them. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-update-by-query) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-update-by-query) ```ts client.updateByQuery({ index }) ``` - - -### Arguments [_arguments_39] - -* **Request (object):** - - * **`index` (string | string[])**: A list of data streams, indices, and aliases to search. It supports wildcards (`*`). To search all data streams or indices, omit this parameter or use `*` or `_all`. - * **`max_docs` (Optional, number)**: The maximum number of documents to update. - * **`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: The documents to update using the Query DSL. - * **`script` (Optional, { source, id, params, lang, options })**: The script to run to update the document source or metadata when updating. - * **`slice` (Optional, { field, id, max })**: Slice the request manually using the provided slice ID and total number of slices. - * **`conflicts` (Optional, Enum("abort" | "proceed"))**: The preferred behavior when update by query hits version conflicts: `abort` or `proceed`. - * **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. - * **`analyzer` (Optional, string)**: The analyzer to use for the query string. This parameter can be used only when the `q` query string parameter is specified. - * **`analyze_wildcard` (Optional, boolean)**: If `true`, wildcard and prefix queries are analyzed. This parameter can be used only when the `q` query string parameter is specified. - * **`default_operator` (Optional, Enum("and" | "or"))**: The default operator for query string query: `AND` or `OR`. This parameter can be used only when the `q` query string parameter is specified. - * **`df` (Optional, string)**: The field to use as default where no field prefix is given in the query string. This parameter can be used only when the `q` query string parameter is specified. - * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. - * **`from` (Optional, number)**: Starting offset (default: 0) - * **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. - * **`lenient` (Optional, boolean)**: If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. This parameter can be used only when the `q` query string parameter is specified. - * **`pipeline` (Optional, string)**: The ID of the pipeline to use to preprocess incoming documents. If the index has a default ingest pipeline specified, then setting the value to `_none` disables the default ingest pipeline for this request. If a final pipeline is configured it will always run, regardless of the value of this parameter. - * **`preference` (Optional, string)**: The node or shard the operation should be performed on. It is random by default. - * **`q` (Optional, string)**: A query in the Lucene query string syntax. - * **`refresh` (Optional, boolean)**: If `true`, Elasticsearch refreshes affected shards to make the operation visible to search after the request completes. This is different than the update API’s `refresh` parameter, which causes just the shard that received the request to be refreshed. - * **`request_cache` (Optional, boolean)**: If `true`, the request cache is used for this request. It defaults to the index-level setting. - * **`requests_per_second` (Optional, float)**: The throttle for this request in sub-requests per second. - * **`routing` (Optional, string)**: A custom value used to route operations to a specific shard. - * **`scroll` (Optional, string | -1 | 0)**: The period to retain the search context for scrolling. - * **`scroll_size` (Optional, number)**: The size of the scroll request that powers the operation. - * **`search_timeout` (Optional, string | -1 | 0)**: An explicit timeout for each search request. By default, there is no timeout. - * **`search_type` (Optional, Enum("query_then_fetch" | "dfs_query_then_fetch"))**: The type of the search operation. Available options include `query_then_fetch` and `dfs_query_then_fetch`. - * **`slices` (Optional, number | Enum("auto"))**: The number of slices this task should be divided into. - * **`sort` (Optional, string[])**: A list of : pairs. - * **`stats` (Optional, string[])**: The specific `tag` of the request for logging and statistical purposes. - * **`terminate_after` (Optional, number)**: The maximum number of documents to collect for each shard. If a query reaches this limit, Elasticsearch terminates the query early. Elasticsearch collects documents before sorting. IMPORTANT: Use with caution. Elasticsearch applies this parameter to each shard handling the request. When possible, let Elasticsearch perform early termination automatically. Avoid specifying this parameter for requests that target data streams with backing indices across multiple data tiers. - * **`timeout` (Optional, string | -1 | 0)**: The period each update request waits for the following operations: dynamic mapping updates, waiting for active shards. By default, it is one minute. This guarantees Elasticsearch waits for at least the timeout before failing. The actual wait time could be longer, particularly when multiple waits occur. - * **`version` (Optional, boolean)**: If `true`, returns the document version as part of a hit. - * **`version_type` (Optional, boolean)**: Should the document increment the version number (internal) on hit or not (reindex) - * **`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))**: The number of shard copies that must be active before proceeding with the operation. Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). The `timeout` parameter controls how long each write request waits for unavailable shards to become available. Both work exactly the way they work in the bulk API. - * **`wait_for_completion` (Optional, boolean)**: If `true`, the request blocks until the operation is complete. If `false`, Elasticsearch performs some preflight checks, launches the request, and returns a task ID that you can use to cancel or get the status of the task. Elasticsearch creates a record of this task as a document at `.tasks/task/${taskId}`. - - - -## update_by_query_rethrottle [_update_by_query_rethrottle] - +### Arguments [_arguments_update_by_query] + +#### Request (object) [_request_update_by_query] + +- **`index` (string | string[])**: A list of data streams, indices, and aliases to search. It supports wildcards (`*`). To search all data streams or indices, omit this parameter or use `*` or `_all`. +- **`max_docs` (Optional, number)**: The maximum number of documents to update. +- **`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: The documents to update using the Query DSL. +- **`script` (Optional, { source, id, params, lang, options })**: The script to run to update the document source or metadata when updating. +- **`slice` (Optional, { field, id, max })**: Slice the request manually using the provided slice ID and total number of slices. +- **`conflicts` (Optional, Enum("abort" | "proceed"))**: The preferred behavior when update by query hits version conflicts: `abort` or `proceed`. +- **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. +- **`analyzer` (Optional, string)**: The analyzer to use for the query string. This parameter can be used only when the `q` query string parameter is specified. +- **`analyze_wildcard` (Optional, boolean)**: If `true`, wildcard and prefix queries are analyzed. This parameter can be used only when the `q` query string parameter is specified. +- **`default_operator` (Optional, Enum("and" | "or"))**: The default operator for query string query: `AND` or `OR`. This parameter can be used only when the `q` query string parameter is specified. +- **`df` (Optional, string)**: The field to use as default where no field prefix is given in the query string. This parameter can be used only when the `q` query string parameter is specified. +- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. +- **`from` (Optional, number)**: Skips the specified number of documents. +- **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. +- **`lenient` (Optional, boolean)**: If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. This parameter can be used only when the `q` query string parameter is specified. +- **`pipeline` (Optional, string)**: The ID of the pipeline to use to preprocess incoming documents. If the index has a default ingest pipeline specified, then setting the value to `_none` disables the default ingest pipeline for this request. If a final pipeline is configured it will always run, regardless of the value of this parameter. +- **`preference` (Optional, string)**: The node or shard the operation should be performed on. It is random by default. +- **`q` (Optional, string)**: A query in the Lucene query string syntax. +- **`refresh` (Optional, boolean)**: If `true`, Elasticsearch refreshes affected shards to make the operation visible to search after the request completes. This is different than the update API's `refresh` parameter, which causes just the shard that received the request to be refreshed. +- **`request_cache` (Optional, boolean)**: If `true`, the request cache is used for this request. It defaults to the index-level setting. +- **`requests_per_second` (Optional, float)**: The throttle for this request in sub-requests per second. +- **`routing` (Optional, string)**: A custom value used to route operations to a specific shard. +- **`scroll` (Optional, string | -1 | 0)**: The period to retain the search context for scrolling. +- **`scroll_size` (Optional, number)**: The size of the scroll request that powers the operation. +- **`search_timeout` (Optional, string | -1 | 0)**: An explicit timeout for each search request. By default, there is no timeout. +- **`search_type` (Optional, Enum("query_then_fetch" | "dfs_query_then_fetch"))**: The type of the search operation. Available options include `query_then_fetch` and `dfs_query_then_fetch`. +- **`slices` (Optional, number | Enum("auto"))**: The number of slices this task should be divided into. +- **`sort` (Optional, string[])**: A list of : pairs. +- **`stats` (Optional, string[])**: The specific `tag` of the request for logging and statistical purposes. +- **`terminate_after` (Optional, number)**: The maximum number of documents to collect for each shard. If a query reaches this limit, Elasticsearch terminates the query early. Elasticsearch collects documents before sorting. IMPORTANT: Use with caution. Elasticsearch applies this parameter to each shard handling the request. When possible, let Elasticsearch perform early termination automatically. Avoid specifying this parameter for requests that target data streams with backing indices across multiple data tiers. +- **`timeout` (Optional, string | -1 | 0)**: The period each update request waits for the following operations: dynamic mapping updates, waiting for active shards. By default, it is one minute. This guarantees Elasticsearch waits for at least the timeout before failing. The actual wait time could be longer, particularly when multiple waits occur. +- **`version` (Optional, boolean)**: If `true`, returns the document version as part of a hit. +- **`version_type` (Optional, boolean)**: Should the document increment the version number (internal) on hit or not (reindex) +- **`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))**: The number of shard copies that must be active before proceeding with the operation. Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). The `timeout` parameter controls how long each write request waits for unavailable shards to become available. Both work exactly the way they work in the bulk API. +- **`wait_for_completion` (Optional, boolean)**: If `true`, the request blocks until the operation is complete. If `false`, Elasticsearch performs some preflight checks, launches the request, and returns a task ID that you can use to cancel or get the status of the task. Elasticsearch creates a record of this task as a document at `.tasks/task/${taskId}`. + +## client.updateByQueryRethrottle [_update_by_query_rethrottle] Throttle an update by query operation. -Change the number of requests per second for a particular update by query operation. Rethrottling that speeds up the query takes effect immediately but rethrotting that slows down the query takes effect after completing the current batch to prevent scroll timeouts. +Change the number of requests per second for a particular update by query operation. +Rethrottling that speeds up the query takes effect immediately but rethrotting that slows down the query takes effect after completing the current batch to prevent scroll timeouts. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-update-by-query) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-update-by-query-rethrottle) ```ts client.updateByQueryRethrottle({ task_id }) ``` +### Arguments [_arguments_update_by_query_rethrottle] +#### Request (object) [_request_update_by_query_rethrottle] -### Arguments [_arguments_40] - -* **Request (object):** - - * **`task_id` (string)**: The ID for the task. - * **`requests_per_second` (Optional, float)**: The throttle for this request in sub-requests per second. To turn off throttling, set it to `-1`. - - - -## async_search [_async_search] - - -### delete [_delete_2] +- **`task_id` (string)**: The ID for the task. +- **`requests_per_second` (Optional, float)**: The throttle for this request in sub-requests per second. To turn off throttling, set it to `-1`. +## client.asyncSearch.delete [_async_search.delete] Delete an async search. -If the asynchronous search is still running, it is cancelled. Otherwise, the saved search results are deleted. If the Elasticsearch security features are enabled, the deletion of a specific async search is restricted to: the authenticated user that submitted the original search request; users that have the `cancel_task` cluster privilege. +If the asynchronous search is still running, it is cancelled. +Otherwise, the saved search results are deleted. +If the Elasticsearch security features are enabled, the deletion of a specific async search is restricted to: the authenticated user that submitted the original search request; users that have the `cancel_task` cluster privilege. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-async-search-submit) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-async-search-submit) ```ts client.asyncSearch.delete({ id }) ``` +### Arguments [_arguments_async_search.delete] -### Arguments [_arguments_41] - -* **Request (object):** - - * **`id` (string)**: A unique identifier for the async search. - - - -### get [_get_2] +#### Request (object) [_request_async_search.delete] +- **`id` (string)**: A unique identifier for the async search. +## client.asyncSearch.get [_async_search.get] Get async search results. -Retrieve the results of a previously submitted asynchronous search request. If the Elasticsearch security features are enabled, access to the results of a specific async search is restricted to the user or API key that submitted it. +Retrieve the results of a previously submitted asynchronous search request. +If the Elasticsearch security features are enabled, access to the results of a specific async search is restricted to the user or API key that submitted it. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-async-search-submit) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-async-search-submit) ```ts client.asyncSearch.get({ id }) ``` +### Arguments [_arguments_async_search.get] -### Arguments [_arguments_42] - -* **Request (object):** - - * **`id` (string)**: A unique identifier for the async search. - * **`keep_alive` (Optional, string | -1 | 0)**: Specifies how long the async search should be available in the cluster. When not specified, the `keep_alive` set with the corresponding submit async request will be used. Otherwise, it is possible to override the value and extend the validity of the request. When this period expires, the search, if still running, is cancelled. If the search is completed, its saved results are deleted. - * **`typed_keys` (Optional, boolean)**: Specify whether aggregation and suggester names should be prefixed by their respective types in the response - * **`wait_for_completion_timeout` (Optional, string | -1 | 0)**: Specifies to wait for the search to be completed up until the provided timeout. Final results will be returned if available before the timeout expires, otherwise the currently available results will be returned once the timeout expires. By default no timeout is set meaning that the currently available results will be returned without any additional wait. - - - -### status [_status] +#### Request (object) [_request_async_search.get] +- **`id` (string)**: A unique identifier for the async search. +- **`keep_alive` (Optional, string | -1 | 0)**: The length of time that the async search should be available in the cluster. +When not specified, the `keep_alive` set with the corresponding submit async request will be used. +Otherwise, it is possible to override the value and extend the validity of the request. +When this period expires, the search, if still running, is cancelled. +If the search is completed, its saved results are deleted. +- **`typed_keys` (Optional, boolean)**: Specify whether aggregation and suggester names should be prefixed by their respective types in the response +- **`wait_for_completion_timeout` (Optional, string | -1 | 0)**: Specifies to wait for the search to be completed up until the provided timeout. +Final results will be returned if available before the timeout expires, otherwise the currently available results will be returned once the timeout expires. +By default no timeout is set meaning that the currently available results will be returned without any additional wait. +## client.asyncSearch.status [_async_search.status] Get the async search status. -Get the status of a previously submitted async search request given its identifier, without retrieving search results. If the Elasticsearch security features are enabled, use of this API is restricted to the `monitoring_user` role. +Get the status of a previously submitted async search request given its identifier, without retrieving search results. +If the Elasticsearch security features are enabled, the access to the status of a specific async search is restricted to: -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-async-search-submit) +* The user or API key that submitted the original async search request. +* Users that have the `monitor` cluster privilege or greater privileges. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-async-search-submit) ```ts client.asyncSearch.status({ id }) ``` +### Arguments [_arguments_async_search.status] -### Arguments [_arguments_43] - -* **Request (object):** - - * **`id` (string)**: A unique identifier for the async search. - * **`keep_alive` (Optional, string | -1 | 0)**: Specifies how long the async search needs to be available. Ongoing async searches and any saved search results are deleted after this period. - - - -### submit [_submit] +#### Request (object) [_request_async_search.status] +- **`id` (string)**: A unique identifier for the async search. +- **`keep_alive` (Optional, string | -1 | 0)**: The length of time that the async search needs to be available. +Ongoing async searches and any saved search results are deleted after this period. +## client.asyncSearch.submit [_async_search.submit] Run an async search. When the primary sort of the results is an indexed field, shards get sorted based on minimum and maximum value that they hold for that field. Partial results become available following the sort criteria that was requested. Warning: Asynchronous search does not support scroll or search requests that include only the suggest section. -By default, Elasticsearch does not allow you to store an async search response larger than 10Mb and an attempt to do this results in an error. The maximum allowed size for a stored async search response can be set by changing the `search.max_async_search_response_size` cluster level setting. +By default, Elasticsearch does not allow you to store an async search response larger than 10Mb and an attempt to do this results in an error. +The maximum allowed size for a stored async search response can be set by changing the `search.max_async_search_response_size` cluster level setting. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-async-search-submit) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-async-search-submit) ```ts client.asyncSearch.submit({ ... }) ``` - -### Arguments [_arguments_44] - -* **Request (object):** - - * **`index` (Optional, string | string[])**: A list of index names to search; use `_all` or empty string to perform the operation on all indices - * **`aggregations` (Optional, Record)** - * **`collapse` (Optional, { field, inner_hits, max_concurrent_group_searches, collapse })** - * **`explain` (Optional, boolean)**: If true, returns detailed information about score computation as part of a hit. - * **`ext` (Optional, Record)**: Configuration of search extensions defined by Elasticsearch plugins. - * **`from` (Optional, number)**: Starting document offset. By default, you cannot page through more than 10,000 hits using the from and size parameters. To page through more hits, use the search_after parameter. - * **`highlight` (Optional, { encoder, fields })** - * **`track_total_hits` (Optional, boolean | number)**: Number of hits matching the query to count accurately. If true, the exact number of hits is returned at the cost of some performance. If false, the response does not include the total number of hits matching the query. Defaults to 10,000 hits. - * **`indices_boost` (Optional, Record[])**: Boosts the _score of documents from specified indices. - * **`docvalue_fields` (Optional, { field, format, include_unmapped }[])**: Array of wildcard (*) patterns. The request returns doc values for field names matching these patterns in the hits.fields property of the response. - * **`knn` (Optional, { field, query_vector, query_vector_builder, k, num_candidates, boost, filter, similarity, inner_hits, rescore_vector } | { field, query_vector, query_vector_builder, k, num_candidates, boost, filter, similarity, inner_hits, rescore_vector }[])**: Defines the approximate kNN search to run. - * **`min_score` (Optional, number)**: Minimum _score for matching documents. Documents with a lower _score are not included in the search results. - * **`post_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })** - * **`profile` (Optional, boolean)** - * **`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: Defines the search definition using the Query DSL. - * **`rescore` (Optional, { window_size, query, learning_to_rank } | { window_size, query, learning_to_rank }[])** - * **`script_fields` (Optional, Record)**: Retrieve a script evaluation (based on different fields) for each hit. - * **`search_after` (Optional, number | number | string | boolean | null | User-defined value[])** - * **`size` (Optional, number)**: The number of hits to return. By default, you cannot page through more than 10,000 hits using the from and size parameters. To page through more hits, use the search_after parameter. - * **`slice` (Optional, { field, id, max })** - * **`sort` (Optional, string | { _score, _doc, _geo_distance, _script } | string | { _score, _doc, _geo_distance, _script }[])** - * **`_source` (Optional, boolean | { excludes, includes })**: Indicates which source fields are returned for matching documents. These fields are returned in the hits._source property of the search response. - * **`fields` (Optional, { field, format, include_unmapped }[])**: Array of wildcard (*) patterns. The request returns values for field names matching these patterns in the hits.fields property of the response. - * **`suggest` (Optional, { text })** - * **`terminate_after` (Optional, number)**: Maximum number of documents to collect for each shard. If a query reaches this limit, Elasticsearch terminates the query early. Elasticsearch collects documents before sorting. Defaults to 0, which does not terminate query execution early. - * **`timeout` (Optional, string)**: Specifies the period of time to wait for a response from each shard. If no response is received before the timeout expires, the request fails and returns an error. Defaults to no timeout. - * **`track_scores` (Optional, boolean)**: If true, calculate and return document scores, even if the scores are not used for sorting. - * **`version` (Optional, boolean)**: If true, returns document version as part of a hit. - * **`seq_no_primary_term` (Optional, boolean)**: If true, returns sequence number and primary term of the last modification of each hit. See Optimistic concurrency control. - * **`stored_fields` (Optional, string | string[])**: List of stored fields to return as part of a hit. If no fields are specified, no stored fields are included in the response. If this field is specified, the _source parameter defaults to false. You can pass _source: true to return both source fields and stored fields in the search response. - * **`pit` (Optional, { id, keep_alive })**: Limits the search to a point in time (PIT). If you provide a PIT, you cannot specify an in the request path. - * **`runtime_mappings` (Optional, Record)**: Defines one or more runtime fields in the search request. These fields take precedence over mapped fields with the same name. - * **`stats` (Optional, string[])**: Stats groups to associate with the search. Each group maintains a statistics aggregation for its associated searches. You can retrieve these stats using the indices stats API. - * **`wait_for_completion_timeout` (Optional, string | -1 | 0)**: Blocks and waits until the search is completed up to a certain timeout. When the async search completes within the timeout, the response won’t include the ID as the results are not stored in the cluster. - * **`keep_alive` (Optional, string | -1 | 0)**: Specifies how long the async search needs to be available. Ongoing async searches and any saved search results are deleted after this period. - * **`keep_on_completion` (Optional, boolean)**: If `true`, results are stored for later retrieval when the search completes within the `wait_for_completion_timeout`. - * **`allow_no_indices` (Optional, boolean)**: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) - * **`allow_partial_search_results` (Optional, boolean)**: Indicate if an error should be returned if there is a partial search failure or timeout - * **`analyzer` (Optional, string)**: The analyzer to use for the query string - * **`analyze_wildcard` (Optional, boolean)**: Specify whether wildcard and prefix queries should be analyzed (default: false) - * **`batched_reduce_size` (Optional, number)**: Affects how often partial results become available, which happens whenever shard results are reduced. A partial reduction is performed every time the coordinating node has received a certain number of new shard responses (5 by default). - * **`ccs_minimize_roundtrips` (Optional, boolean)**: The default value is the only supported value. - * **`default_operator` (Optional, Enum("and" | "or"))**: The default operator for query string query (AND or OR) - * **`df` (Optional, string)**: The field to use as default where no field prefix is given in the query string - * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Whether to expand wildcard expression to concrete indices that are open, closed or both. - * **`ignore_throttled` (Optional, boolean)**: Whether specified concrete, expanded or aliased indices should be ignored when throttled - * **`ignore_unavailable` (Optional, boolean)**: Whether specified concrete indices should be ignored when unavailable (missing or closed) - * **`lenient` (Optional, boolean)**: Specify whether format-based query failures (such as providing text to a numeric field) should be ignored - * **`max_concurrent_shard_requests` (Optional, number)**: The number of concurrent shard requests per node this search executes concurrently. This value should be used to limit the impact of the search on the cluster in order to limit the number of concurrent shard requests - * **`preference` (Optional, string)**: Specify the node or shard the operation should be performed on (default: random) - * **`request_cache` (Optional, boolean)**: Specify if request cache should be used for this request or not, defaults to true - * **`routing` (Optional, string)**: A list of specific routing values - * **`search_type` (Optional, Enum("query_then_fetch" | "dfs_query_then_fetch"))**: Search operation type - * **`suggest_field` (Optional, string)**: Specifies which field to use for suggestions. - * **`suggest_mode` (Optional, Enum("missing" | "popular" | "always"))**: Specify suggest mode - * **`suggest_size` (Optional, number)**: How many suggestions to return in response - * **`suggest_text` (Optional, string)**: The source text for which the suggestions should be returned. - * **`typed_keys` (Optional, boolean)**: Specify whether aggregation and suggester names should be prefixed by their respective types in the response - * **`rest_total_hits_as_int` (Optional, boolean)**: Indicates whether hits.total should be rendered as an integer or an object in the rest search response - * **`_source_excludes` (Optional, string | string[])**: A list of fields to exclude from the returned _source field - * **`_source_includes` (Optional, string | string[])**: A list of fields to extract and return from the _source field - * **`q` (Optional, string)**: Query in the Lucene query string syntax - - - -## autoscaling [_autoscaling] - - -### delete_autoscaling_policy [_delete_autoscaling_policy] - +### Arguments [_arguments_async_search.submit] + +#### Request (object) [_request_async_search.submit] +- **`index` (Optional, string | string[])**: A list of index names to search; use `_all` or empty string to perform the operation on all indices +- **`aggregations` (Optional, Record)** +- **`collapse` (Optional, { field, inner_hits, max_concurrent_group_searches, collapse })** +- **`explain` (Optional, boolean)**: If true, returns detailed information about score computation as part of a hit. +- **`ext` (Optional, Record)**: Configuration of search extensions defined by Elasticsearch plugins. +- **`from` (Optional, number)**: Starting document offset. By default, you cannot page through more than 10,000 +hits using the from and size parameters. To page through more hits, use the +search_after parameter. +- **`highlight` (Optional, { encoder, fields })** +- **`track_total_hits` (Optional, boolean | number)**: Number of hits matching the query to count accurately. If true, the exact +number of hits is returned at the cost of some performance. If false, the +response does not include the total number of hits matching the query. +Defaults to 10,000 hits. +- **`indices_boost` (Optional, Record[])**: Boosts the _score of documents from specified indices. +- **`docvalue_fields` (Optional, { field, format, include_unmapped }[])**: Array of wildcard (*) patterns. The request returns doc values for field +names matching these patterns in the hits.fields property of the response. +- **`knn` (Optional, { field, query_vector, query_vector_builder, k, num_candidates, boost, filter, similarity, inner_hits, rescore_vector } | { field, query_vector, query_vector_builder, k, num_candidates, boost, filter, similarity, inner_hits, rescore_vector }[])**: Defines the approximate kNN search to run. +- **`min_score` (Optional, number)**: Minimum _score for matching documents. Documents with a lower _score are +not included in search results and results collected by aggregations. +- **`post_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })** +- **`profile` (Optional, boolean)** +- **`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: Defines the search definition using the Query DSL. +- **`rescore` (Optional, { window_size, query, learning_to_rank } | { window_size, query, learning_to_rank }[])** +- **`script_fields` (Optional, Record)**: Retrieve a script evaluation (based on different fields) for each hit. +- **`search_after` (Optional, number | number | string | boolean | null[])** +- **`size` (Optional, number)**: The number of hits to return. By default, you cannot page through more +than 10,000 hits using the from and size parameters. To page through more +hits, use the search_after parameter. +- **`slice` (Optional, { field, id, max })** +- **`sort` (Optional, string | { _score, _doc, _geo_distance, _script } | string | { _score, _doc, _geo_distance, _script }[])** +- **`_source` (Optional, boolean | { excludes, includes })**: Indicates which source fields are returned for matching documents. These +fields are returned in the hits._source property of the search response. +- **`fields` (Optional, { field, format, include_unmapped }[])**: Array of wildcard (*) patterns. The request returns values for field names +matching these patterns in the hits.fields property of the response. +- **`suggest` (Optional, { text })** +- **`terminate_after` (Optional, number)**: Maximum number of documents to collect for each shard. If a query reaches this +limit, Elasticsearch terminates the query early. Elasticsearch collects documents +before sorting. Defaults to 0, which does not terminate query execution early. +- **`timeout` (Optional, string)**: Specifies the period of time to wait for a response from each shard. If no response +is received before the timeout expires, the request fails and returns an error. +Defaults to no timeout. +- **`track_scores` (Optional, boolean)**: If true, calculate and return document scores, even if the scores are not used for sorting. +- **`version` (Optional, boolean)**: If true, returns document version as part of a hit. +- **`seq_no_primary_term` (Optional, boolean)**: If true, returns sequence number and primary term of the last modification +of each hit. See Optimistic concurrency control. +- **`stored_fields` (Optional, string | string[])**: List of stored fields to return as part of a hit. If no fields are specified, +no stored fields are included in the response. If this field is specified, the _source +parameter defaults to false. You can pass _source: true to return both source fields +and stored fields in the search response. +- **`pit` (Optional, { id, keep_alive })**: Limits the search to a point in time (PIT). If you provide a PIT, you +cannot specify an in the request path. +- **`runtime_mappings` (Optional, Record)**: Defines one or more runtime fields in the search request. These fields take +precedence over mapped fields with the same name. +- **`stats` (Optional, string[])**: Stats groups to associate with the search. Each group maintains a statistics +aggregation for its associated searches. You can retrieve these stats using +the indices stats API. +- **`wait_for_completion_timeout` (Optional, string | -1 | 0)**: Blocks and waits until the search is completed up to a certain timeout. +When the async search completes within the timeout, the response won’t include the ID as the results are not stored in the cluster. +- **`keep_alive` (Optional, string | -1 | 0)**: Specifies how long the async search needs to be available. +Ongoing async searches and any saved search results are deleted after this period. +- **`keep_on_completion` (Optional, boolean)**: If `true`, results are stored for later retrieval when the search completes within the `wait_for_completion_timeout`. +- **`allow_no_indices` (Optional, boolean)**: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) +- **`allow_partial_search_results` (Optional, boolean)**: Indicate if an error should be returned if there is a partial search failure or timeout +- **`analyzer` (Optional, string)**: The analyzer to use for the query string +- **`analyze_wildcard` (Optional, boolean)**: Specify whether wildcard and prefix queries should be analyzed (default: false) +- **`batched_reduce_size` (Optional, number)**: Affects how often partial results become available, which happens whenever shard results are reduced. +A partial reduction is performed every time the coordinating node has received a certain number of new shard responses (5 by default). +- **`ccs_minimize_roundtrips` (Optional, boolean)**: The default value is the only supported value. +- **`default_operator` (Optional, Enum("and" | "or"))**: The default operator for query string query (AND or OR) +- **`df` (Optional, string)**: The field to use as default where no field prefix is given in the query string +- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Whether to expand wildcard expression to concrete indices that are open, closed or both. +- **`ignore_throttled` (Optional, boolean)**: Whether specified concrete, expanded or aliased indices should be ignored when throttled +- **`ignore_unavailable` (Optional, boolean)**: Whether specified concrete indices should be ignored when unavailable (missing or closed) +- **`lenient` (Optional, boolean)**: Specify whether format-based query failures (such as providing text to a numeric field) should be ignored +- **`max_concurrent_shard_requests` (Optional, number)**: The number of concurrent shard requests per node this search executes concurrently. This value should be used to limit the impact of the search on the cluster in order to limit the number of concurrent shard requests +- **`preference` (Optional, string)**: Specify the node or shard the operation should be performed on (default: random) +- **`request_cache` (Optional, boolean)**: Specify if request cache should be used for this request or not, defaults to true +- **`routing` (Optional, string)**: A list of specific routing values +- **`search_type` (Optional, Enum("query_then_fetch" | "dfs_query_then_fetch"))**: Search operation type +- **`suggest_field` (Optional, string)**: Specifies which field to use for suggestions. +- **`suggest_mode` (Optional, Enum("missing" | "popular" | "always"))**: Specify suggest mode +- **`suggest_size` (Optional, number)**: How many suggestions to return in response +- **`suggest_text` (Optional, string)**: The source text for which the suggestions should be returned. +- **`typed_keys` (Optional, boolean)**: Specify whether aggregation and suggester names should be prefixed by their respective types in the response +- **`rest_total_hits_as_int` (Optional, boolean)**: Indicates whether hits.total should be rendered as an integer or an object in the rest search response +- **`_source_excludes` (Optional, string | string[])**: A list of fields to exclude from the returned _source field +- **`_source_includes` (Optional, string | string[])**: A list of fields to extract and return from the _source field +- **`q` (Optional, string)**: Query in the Lucene query string syntax + +## client.autoscaling.deleteAutoscalingPolicy [_autoscaling.delete_autoscaling_policy] Delete an autoscaling policy. -::::{note} -This feature is designed for indirect use by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. -:::: +NOTE: This feature is designed for indirect use by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-autoscaling-delete-autoscaling-policy) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-autoscaling-delete-autoscaling-policy) ```ts client.autoscaling.deleteAutoscalingPolicy({ name }) ``` +### Arguments [_arguments_autoscaling.delete_autoscaling_policy] -### Arguments [_arguments_45] - -* **Request (object):** - - * **`name` (string)**: the name of the autoscaling policy - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - - - -### get_autoscaling_capacity [_get_autoscaling_capacity] +#### Request (object) [_request_autoscaling.delete_autoscaling_policy] +- **`name` (string)**: the name of the autoscaling policy +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +## client.autoscaling.getAutoscalingCapacity [_autoscaling.get_autoscaling_capacity] Get the autoscaling capacity. -::::{note} -This feature is designed for indirect use by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. -:::: +NOTE: This feature is designed for indirect use by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. - -This API gets the current autoscaling capacity based on the configured autoscaling policy. It will return information to size the cluster appropriately to the current workload. +This API gets the current autoscaling capacity based on the configured autoscaling policy. +It will return information to size the cluster appropriately to the current workload. The `required_capacity` is calculated as the maximum of the `required_capacity` result of all individual deciders that are enabled for the policy. The operator should verify that the `current_nodes` match the operator’s knowledge of the cluster to avoid making autoscaling decisions based on stale or incomplete information. -The response contains decider-specific information you can use to diagnose how and why autoscaling determined a certain capacity was required. This information is provided for diagnosis only. Do not use this information to make autoscaling decisions. +The response contains decider-specific information you can use to diagnose how and why autoscaling determined a certain capacity was required. +This information is provided for diagnosis only. +Do not use this information to make autoscaling decisions. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-autoscaling-get-autoscaling-capacity) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-autoscaling-get-autoscaling-capacity) ```ts client.autoscaling.getAutoscalingCapacity({ ... }) ``` +### Arguments [_arguments_autoscaling.get_autoscaling_capacity] -### Arguments [_arguments_46] - -* **Request (object):** - - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - - - -### get_autoscaling_policy [_get_autoscaling_policy] +#### Request (object) [_request_autoscaling.get_autoscaling_capacity] +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +## client.autoscaling.getAutoscalingPolicy [_autoscaling.get_autoscaling_policy] Get an autoscaling policy. -::::{note} -This feature is designed for indirect use by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. -:::: +NOTE: This feature is designed for indirect use by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-autoscaling-get-autoscaling-capacity) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-autoscaling-get-autoscaling-capacity) ```ts client.autoscaling.getAutoscalingPolicy({ name }) ``` +### Arguments [_arguments_autoscaling.get_autoscaling_policy] -### Arguments [_arguments_47] - -* **Request (object):** - - * **`name` (string)**: the name of the autoscaling policy - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - - - -### put_autoscaling_policy [_put_autoscaling_policy] +#### Request (object) [_request_autoscaling.get_autoscaling_policy] +- **`name` (string)**: the name of the autoscaling policy +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +## client.autoscaling.putAutoscalingPolicy [_autoscaling.put_autoscaling_policy] Create or update an autoscaling policy. -::::{note} -This feature is designed for indirect use by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. -:::: +NOTE: This feature is designed for indirect use by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-autoscaling-put-autoscaling-policy) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-autoscaling-put-autoscaling-policy) ```ts client.autoscaling.putAutoscalingPolicy({ name }) ``` +### Arguments [_arguments_autoscaling.put_autoscaling_policy] -### Arguments [_arguments_48] - -* **Request (object):** - - * **`name` (string)**: the name of the autoscaling policy - * **`policy` (Optional, { roles, deciders })** - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - - - -## cat [_cat] - - -### aliases [_aliases] +#### Request (object) [_request_autoscaling.put_autoscaling_policy] +- **`name` (string)**: the name of the autoscaling policy +- **`policy` (Optional, { roles, deciders })** +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +## client.cat.aliases [_cat.aliases] Get aliases. -Get the cluster’s index aliases, including filter and routing information. This API does not return data stream aliases. +Get the cluster's index aliases, including filter and routing information. +This API does not return data stream aliases. -::::{important} -CAT APIs are only intended for human consumption using the command line or the Kibana console. They are not intended for use by applications. For application consumption, use the aliases API. -:::: +IMPORTANT: CAT APIs are only intended for human consumption using the command line or the Kibana console. They are not intended for use by applications. For application consumption, use the aliases API. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-aliases) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-cat-aliases) ```ts client.cat.aliases({ ... }) ``` +### Arguments [_arguments_cat.aliases] -### Arguments [_arguments_49] - -* **Request (object):** - - * **`name` (Optional, string | string[])**: A list of aliases to retrieve. Supports wildcards (`*`). To retrieve all aliases, omit this parameter or use `*` or `_all`. - * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports a list of values, such as `open,hidden`. - * **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. If the master node is not available before the timeout expires, the request fails and returns an error. To indicated that the request should never timeout, you can set it to `-1`. - - - -### allocation [_allocation] +#### Request (object) [_request_cat.aliases] +- **`name` (Optional, string | string[])**: A list of aliases to retrieve. Supports wildcards (`*`). To retrieve all aliases, omit this parameter or use `*` or `_all`. +- **`h` (Optional, string | string[])**: List of columns to appear in the response. Supports simple wildcards. +- **`s` (Optional, string | string[])**: List of columns that determine how the table should be sorted. +Sorting defaults to ascending and can be changed by setting `:asc` +or `:desc` as a suffix to the column name. +- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: The type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +It supports a list of values, such as `open,hidden`. +- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. +If the master node is not available before the timeout expires, the request fails and returns an error. +To indicated that the request should never timeout, you can set it to `-1`. +## client.cat.allocation [_cat.allocation] Get shard allocation information. Get a snapshot of the number of shards allocated to each data node and their disk space. -::::{important} -CAT APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. -:::: +IMPORTANT: CAT APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-allocation) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-cat-allocation) ```ts client.cat.allocation({ ... }) ``` +### Arguments [_arguments_cat.allocation] -### Arguments [_arguments_50] - -* **Request (object):** - - * **`node_id` (Optional, string | string[])**: A list of node identifiers or names used to limit the returned information. - * **`bytes` (Optional, Enum("b" | "kb" | "mb" | "gb" | "tb" | "pb"))**: The unit used to display byte values. - * **`local` (Optional, boolean)**: If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. - - - -### component_templates [_component_templates] +#### Request (object) [_request_cat.allocation] +- **`node_id` (Optional, string | string[])**: A list of node identifiers or names used to limit the returned information. +- **`bytes` (Optional, Enum("b" | "kb" | "mb" | "gb" | "tb" | "pb"))**: The unit used to display byte values. +- **`h` (Optional, string | string[])**: List of columns to appear in the response. Supports simple wildcards. +- **`s` (Optional, string | string[])**: List of columns that determine how the table should be sorted. +Sorting defaults to ascending and can be changed by setting `:asc` +or `:desc` as a suffix to the column name. +- **`local` (Optional, boolean)**: If `true`, the request computes the list of selected nodes from the +local cluster state. If `false` the list of selected nodes are computed +from the cluster state of the master node. In both cases the coordinating +node will send requests for further information to each selected node. +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +## client.cat.componentTemplates [_cat.component_templates] Get component templates. -Get information about component templates in a cluster. Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases. +Get information about component templates in a cluster. +Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases. -::::{important} -CAT APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get component template API. -:::: +IMPORTANT: CAT APIs are only intended for human consumption using the command line or Kibana console. +They are not intended for use by applications. For application consumption, use the get component template API. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-component-templates) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-cat-component-templates) ```ts client.cat.componentTemplates({ ... }) ``` +### Arguments [_arguments_cat.component_templates] -### Arguments [_arguments_51] - -* **Request (object):** - - * **`name` (Optional, string)**: The name of the component template. It accepts wildcard expressions. If it is omitted, all component templates are returned. - * **`local` (Optional, boolean)**: If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node. - * **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. - - - -### count [_count_2] +#### Request (object) [_request_cat.component_templates] +- **`name` (Optional, string)**: The name of the component template. +It accepts wildcard expressions. +If it is omitted, all component templates are returned. +- **`h` (Optional, string | string[])**: List of columns to appear in the response. Supports simple wildcards. +- **`s` (Optional, string | string[])**: List of columns that determine how the table should be sorted. +Sorting defaults to ascending and can be changed by setting `:asc` +or `:desc` as a suffix to the column name. +- **`local` (Optional, boolean)**: If `true`, the request computes the list of selected nodes from the +local cluster state. If `false` the list of selected nodes are computed +from the cluster state of the master node. In both cases the coordinating +node will send requests for further information to each selected node. +- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. +## client.cat.count [_cat.count] Get a document count. -Get quick access to a document count for a data stream, an index, or an entire cluster. The document count only includes live documents, not deleted documents which have not yet been removed by the merge process. +Get quick access to a document count for a data stream, an index, or an entire cluster. +The document count only includes live documents, not deleted documents which have not yet been removed by the merge process. -::::{important} -CAT APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the count API. -:::: +IMPORTANT: CAT APIs are only intended for human consumption using the command line or Kibana console. +They are not intended for use by applications. For application consumption, use the count API. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-count) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-cat-count) ```ts client.cat.count({ ... }) ``` +### Arguments [_arguments_cat.count] -### Arguments [_arguments_52] - -* **Request (object):** - - * **`index` (Optional, string | string[])**: A list of data streams, indices, and aliases used to limit the request. It supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. - - - -### fielddata [_fielddata] +#### Request (object) [_request_cat.count] +- **`index` (Optional, string | string[])**: A list of data streams, indices, and aliases used to limit the request. +It supports wildcards (`*`). +To target all data streams and indices, omit this parameter or use `*` or `_all`. +- **`h` (Optional, string | string[])**: List of columns to appear in the response. Supports simple wildcards. +- **`s` (Optional, string | string[])**: List of columns that determine how the table should be sorted. +Sorting defaults to ascending and can be changed by setting `:asc` +or `:desc` as a suffix to the column name. +## client.cat.fielddata [_cat.fielddata] Get field data cache information. Get the amount of heap memory currently used by the field data cache on every data node in the cluster. -::::{important} -cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes stats API. -:::: +IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. +They are not intended for use by applications. For application consumption, use the nodes stats API. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-fielddata) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-cat-fielddata) ```ts client.cat.fielddata({ ... }) ``` +### Arguments [_arguments_cat.fielddata] -### Arguments [_arguments_53] - -* **Request (object):** - - * **`fields` (Optional, string | string[])**: List of fields used to limit returned information. To retrieve all fields, omit this parameter. - * **`bytes` (Optional, Enum("b" | "kb" | "mb" | "gb" | "tb" | "pb"))**: The unit used to display byte values. - - - -### health [_health] +#### Request (object) [_request_cat.fielddata] +- **`fields` (Optional, string | string[])**: List of fields used to limit returned information. +To retrieve all fields, omit this parameter. +- **`bytes` (Optional, Enum("b" | "kb" | "mb" | "gb" | "tb" | "pb"))**: The unit used to display byte values. +- **`h` (Optional, string | string[])**: List of columns to appear in the response. Supports simple wildcards. +- **`s` (Optional, string | string[])**: List of columns that determine how the table should be sorted. +Sorting defaults to ascending and can be changed by setting `:asc` +or `:desc` as a suffix to the column name. +## client.cat.health [_cat.health] Get the cluster health status. -::::{important} -CAT APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the cluster health API. This API is often used to check malfunctioning clusters. To help you track cluster health alongside log files and alerting systems, the API returns timestamps in two formats: `HH:MM:SS`, which is human-readable but includes no date information; `Unix epoch time`, which is machine-sortable and includes date information. The latter format is useful for cluster recoveries that take multiple days. You can use the cat health API to verify cluster health across multiple nodes. You also can use the API to track the recovery of a large cluster over a longer period of time. -:::: +IMPORTANT: CAT APIs are only intended for human consumption using the command line or Kibana console. +They are not intended for use by applications. For application consumption, use the cluster health API. +This API is often used to check malfunctioning clusters. +To help you track cluster health alongside log files and alerting systems, the API returns timestamps in two formats: +`HH:MM:SS`, which is human-readable but includes no date information; +`Unix epoch time`, which is machine-sortable and includes date information. +The latter format is useful for cluster recoveries that take multiple days. +You can use the cat health API to verify cluster health across multiple nodes. +You also can use the API to track the recovery of a large cluster over a longer period of time. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-health) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-cat-health) ```ts client.cat.health({ ... }) ``` +### Arguments [_arguments_cat.health] -### Arguments [_arguments_54] - -* **Request (object):** - - * **`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))**: The unit used to display time values. - * **`ts` (Optional, boolean)**: If true, returns `HH:MM:SS` and Unix epoch timestamps. - - - -### help [_help] +#### Request (object) [_request_cat.health] +- **`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))**: The unit used to display time values. +- **`ts` (Optional, boolean)**: If true, returns `HH:MM:SS` and Unix epoch timestamps. +- **`h` (Optional, string | string[])**: List of columns to appear in the response. Supports simple wildcards. +- **`s` (Optional, string | string[])**: List of columns that determine how the table should be sorted. +Sorting defaults to ascending and can be changed by setting `:asc` +or `:desc` as a suffix to the column name. +## client.cat.help [_cat.help] Get CAT help. Get help for the CAT APIs. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-cat) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/group/endpoint-cat) ```ts client.cat.help() ``` -### indices [_indices] - +## client.cat.indices [_cat.indices] Get index information. Get high-level information about indices in a cluster, including backing indices for data streams. -Use this request to get the following information for each index in a cluster: - shard count - document count - deleted document count - primary store size - total store size of all shards, including shard replicas +Use this request to get the following information for each index in a cluster: +- shard count +- document count +- deleted document count +- primary store size +- total store size of all shards, including shard replicas -These metrics are retrieved directly from Lucene, which Elasticsearch uses internally to power indexing and search. As a result, all document counts include hidden nested documents. To get an accurate count of Elasticsearch documents, use the cat count or count APIs. +These metrics are retrieved directly from Lucene, which Elasticsearch uses internally to power indexing and search. As a result, all document counts include hidden nested documents. +To get an accurate count of Elasticsearch documents, use the cat count or count APIs. -CAT APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use an index endpoint. +CAT APIs are only intended for human consumption using the command line or Kibana console. +They are not intended for use by applications. For application consumption, use an index endpoint. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-indices) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-cat-indices) ```ts client.cat.indices({ ... }) ``` - -### Arguments [_arguments_55] - -* **Request (object):** - - * **`index` (Optional, string | string[])**: List of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. - * **`bytes` (Optional, Enum("b" | "kb" | "mb" | "gb" | "tb" | "pb"))**: The unit used to display byte values. - * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: The type of index that wildcard patterns can match. - * **`health` (Optional, Enum("green" | "yellow" | "red"))**: The health status used to limit returned indices. By default, the response includes indices of any health status. - * **`include_unloaded_segments` (Optional, boolean)**: If true, the response includes information from segments that are not loaded into memory. - * **`pri` (Optional, boolean)**: If true, the response only includes information from primary shards. - * **`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))**: The unit used to display time values. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. - - - -### master [_master] - +### Arguments [_arguments_cat.indices] + +#### Request (object) [_request_cat.indices] +- **`index` (Optional, string | string[])**: List of data streams, indices, and aliases used to limit the request. +Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. +- **`bytes` (Optional, Enum("b" | "kb" | "mb" | "gb" | "tb" | "pb"))**: The unit used to display byte values. +- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: The type of index that wildcard patterns can match. +- **`health` (Optional, Enum("green" | "yellow" | "red"))**: The health status used to limit returned indices. By default, the response includes indices of any health status. +- **`include_unloaded_segments` (Optional, boolean)**: If true, the response includes information from segments that are not loaded into memory. +- **`pri` (Optional, boolean)**: If true, the response only includes information from primary shards. +- **`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))**: The unit used to display time values. +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +- **`h` (Optional, string | string[])**: List of columns to appear in the response. Supports simple wildcards. +- **`s` (Optional, string | string[])**: List of columns that determine how the table should be sorted. +Sorting defaults to ascending and can be changed by setting `:asc` +or `:desc` as a suffix to the column name. + +## client.cat.master [_cat.master] Get master node information. Get information about the master node, including the ID, bound IP address, and name. -::::{important} -cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API. -:::: +IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-master) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-cat-master) ```ts client.cat.master({ ... }) ``` +### Arguments [_arguments_cat.master] -### Arguments [_arguments_56] - -* **Request (object):** - - * **`local` (Optional, boolean)**: If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. - - - -### ml_data_frame_analytics [_ml_data_frame_analytics] +#### Request (object) [_request_cat.master] +- **`h` (Optional, string | string[])**: List of columns to appear in the response. Supports simple wildcards. +- **`s` (Optional, string | string[])**: List of columns that determine how the table should be sorted. +Sorting defaults to ascending and can be changed by setting `:asc` +or `:desc` as a suffix to the column name. +- **`local` (Optional, boolean)**: If `true`, the request computes the list of selected nodes from the +local cluster state. If `false` the list of selected nodes are computed +from the cluster state of the master node. In both cases the coordinating +node will send requests for further information to each selected node. +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +## client.cat.mlDataFrameAnalytics [_cat.ml_data_frame_analytics] Get data frame analytics jobs. Get configuration and usage information about data frame analytics jobs. -::::{important} -CAT APIs are only intended for human consumption using the Kibana console or command line. They are not intended for use by applications. For application consumption, use the get data frame analytics jobs statistics API. -:::: +IMPORTANT: CAT APIs are only intended for human consumption using the Kibana +console or command line. They are not intended for use by applications. For +application consumption, use the get data frame analytics jobs statistics API. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-ml-data-frame-analytics) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-cat-ml-data-frame-analytics) ```ts client.cat.mlDataFrameAnalytics({ ... }) ``` +### Arguments [_arguments_cat.ml_data_frame_analytics] -### Arguments [_arguments_57] - -* **Request (object):** - - * **`id` (Optional, string)**: The ID of the data frame analytics to fetch - * **`allow_no_match` (Optional, boolean)**: Whether to ignore if a wildcard expression matches no configs. (This includes `_all` string or when no configs have been specified) - * **`bytes` (Optional, Enum("b" | "kb" | "mb" | "gb" | "tb" | "pb"))**: The unit in which to display byte values - * **`h` (Optional, Enum("assignment_explanation" | "create_time" | "description" | "dest_index" | "failure_reason" | "id" | "model_memory_limit" | "node.address" | "node.ephemeral_id" | "node.id" | "node.name" | "progress" | "source_index" | "state" | "type" | "version") | Enum("assignment_explanation" | "create_time" | "description" | "dest_index" | "failure_reason" | "id" | "model_memory_limit" | "node.address" | "node.ephemeral_id" | "node.id" | "node.name" | "progress" | "source_index" | "state" | "type" | "version")[])**: List of column names to display. - * **`s` (Optional, Enum("assignment_explanation" | "create_time" | "description" | "dest_index" | "failure_reason" | "id" | "model_memory_limit" | "node.address" | "node.ephemeral_id" | "node.id" | "node.name" | "progress" | "source_index" | "state" | "type" | "version") | Enum("assignment_explanation" | "create_time" | "description" | "dest_index" | "failure_reason" | "id" | "model_memory_limit" | "node.address" | "node.ephemeral_id" | "node.id" | "node.name" | "progress" | "source_index" | "state" | "type" | "version")[])**: List of column names or column aliases used to sort the response. - * **`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))**: Unit used to display time values. - - - -### ml_datafeeds [_ml_datafeeds] +#### Request (object) [_request_cat.ml_data_frame_analytics] +- **`id` (Optional, string)**: The ID of the data frame analytics to fetch +- **`allow_no_match` (Optional, boolean)**: Whether to ignore if a wildcard expression matches no configs. (This includes `_all` string or when no configs have been specified) +- **`bytes` (Optional, Enum("b" | "kb" | "mb" | "gb" | "tb" | "pb"))**: The unit in which to display byte values +- **`h` (Optional, Enum("assignment_explanation" | "create_time" | "description" | "dest_index" | "failure_reason" | "id" | "model_memory_limit" | "node.address" | "node.ephemeral_id" | "node.id" | "node.name" | "progress" | "source_index" | "state" | "type" | "version") | Enum("assignment_explanation" | "create_time" | "description" | "dest_index" | "failure_reason" | "id" | "model_memory_limit" | "node.address" | "node.ephemeral_id" | "node.id" | "node.name" | "progress" | "source_index" | "state" | "type" | "version")[])**: List of column names to display. +- **`s` (Optional, Enum("assignment_explanation" | "create_time" | "description" | "dest_index" | "failure_reason" | "id" | "model_memory_limit" | "node.address" | "node.ephemeral_id" | "node.id" | "node.name" | "progress" | "source_index" | "state" | "type" | "version") | Enum("assignment_explanation" | "create_time" | "description" | "dest_index" | "failure_reason" | "id" | "model_memory_limit" | "node.address" | "node.ephemeral_id" | "node.id" | "node.name" | "progress" | "source_index" | "state" | "type" | "version")[])**: List of column names or column aliases used to sort the +response. +- **`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))**: Unit used to display time values. +## client.cat.mlDatafeeds [_cat.ml_datafeeds] Get datafeeds. -Get configuration and usage information about datafeeds. This API returns a maximum of 10,000 datafeeds. If the Elasticsearch security features are enabled, you must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster privileges to use this API. +Get configuration and usage information about datafeeds. +This API returns a maximum of 10,000 datafeeds. +If the Elasticsearch security features are enabled, you must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` +cluster privileges to use this API. -::::{important} -CAT APIs are only intended for human consumption using the Kibana console or command line. They are not intended for use by applications. For application consumption, use the get datafeed statistics API. -:::: +IMPORTANT: CAT APIs are only intended for human consumption using the Kibana +console or command line. They are not intended for use by applications. For +application consumption, use the get datafeed statistics API. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-ml-datafeeds) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-cat-ml-datafeeds) ```ts client.cat.mlDatafeeds({ ... }) ``` +### Arguments [_arguments_cat.ml_datafeeds] -### Arguments [_arguments_58] - -* **Request (object):** - - * **`datafeed_id` (Optional, string)**: A numerical character string that uniquely identifies the datafeed. - * **`allow_no_match` (Optional, boolean)**: Specifies what to do when the request: +#### Request (object) [_request_cat.ml_datafeeds] +- **`datafeed_id` (Optional, string)**: A numerical character string that uniquely identifies the datafeed. +- **`allow_no_match` (Optional, boolean)**: Specifies what to do when the request: * Contains wildcard expressions and there are no datafeeds that match. * Contains the `_all` string or no identifiers and there are no matches. * Contains wildcard expressions and there are only partial matches. -If `true`, the API returns an empty datafeeds array when there are no matches and the subset of results when there are partial matches. If `false`, the API returns a 404 status code when there are no matches or only partial matches. ** *`h` (Optional, Enum("ae" | "bc" | "id" | "na" | "ne" | "ni" | "nn" | "sba" | "sc" | "seah" | "st" | "s") | Enum("ae" | "bc" | "id" | "na" | "ne" | "ni" | "nn" | "sba" | "sc" | "seah" | "st" | "s")[])**: List of column names to display. *** *`s` (Optional, Enum("ae" | "bc" | "id" | "na" | "ne" | "ni" | "nn" | "sba" | "sc" | "seah" | "st" | "s") | Enum("ae" | "bc" | "id" | "na" | "ne" | "ni" | "nn" | "sba" | "sc" | "seah" | "st" | "s")[])**: List of column names or column aliases used to sort the response. ** *`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))**: The unit used to display time values. - - -### ml_jobs [_ml_jobs] +If `true`, the API returns an empty datafeeds array when there are no matches and the subset of results when +there are partial matches. If `false`, the API returns a 404 status code when there are no matches or only +partial matches. +- **`h` (Optional, Enum("ae" | "bc" | "id" | "na" | "ne" | "ni" | "nn" | "sba" | "sc" | "seah" | "st" | "s") | Enum("ae" | "bc" | "id" | "na" | "ne" | "ni" | "nn" | "sba" | "sc" | "seah" | "st" | "s")[])**: List of column names to display. +- **`s` (Optional, Enum("ae" | "bc" | "id" | "na" | "ne" | "ni" | "nn" | "sba" | "sc" | "seah" | "st" | "s") | Enum("ae" | "bc" | "id" | "na" | "ne" | "ni" | "nn" | "sba" | "sc" | "seah" | "st" | "s")[])**: List of column names or column aliases used to sort the response. +- **`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))**: The unit used to display time values. +## client.cat.mlJobs [_cat.ml_jobs] Get anomaly detection jobs. -Get configuration and usage information for anomaly detection jobs. This API returns a maximum of 10,000 jobs. If the Elasticsearch security features are enabled, you must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster privileges to use this API. - -::::{important} -CAT APIs are only intended for human consumption using the Kibana console or command line. They are not intended for use by applications. For application consumption, use the get anomaly detection job statistics API. -:::: +Get configuration and usage information for anomaly detection jobs. +This API returns a maximum of 10,000 jobs. +If the Elasticsearch security features are enabled, you must have `monitor_ml`, +`monitor`, `manage_ml`, or `manage` cluster privileges to use this API. +IMPORTANT: CAT APIs are only intended for human consumption using the Kibana +console or command line. They are not intended for use by applications. For +application consumption, use the get anomaly detection job statistics API. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-ml-jobs) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-cat-ml-jobs) ```ts client.cat.mlJobs({ ... }) ``` +### Arguments [_arguments_cat.ml_jobs] -### Arguments [_arguments_59] - -* **Request (object):** - - * **`job_id` (Optional, string)**: Identifier for the anomaly detection job. - * **`allow_no_match` (Optional, boolean)**: Specifies what to do when the request: +#### Request (object) [_request_cat.ml_jobs] +- **`job_id` (Optional, string)**: Identifier for the anomaly detection job. +- **`allow_no_match` (Optional, boolean)**: Specifies what to do when the request: * Contains wildcard expressions and there are no jobs that match. * Contains the `_all` string or no identifiers and there are no matches. * Contains wildcard expressions and there are only partial matches. -If `true`, the API returns an empty jobs array when there are no matches and the subset of results when there are partial matches. If `false`, the API returns a 404 status code when there are no matches or only partial matches. - -```json -`bytes` (Optional, Enum("b" | "kb" | "mb" | "gb" | "tb" | "pb")): The unit used to display byte values. `h` (Optional, Enum("assignment_explanation" | "buckets.count" | "buckets.time.exp_avg" | "buckets.time.exp_avg_hour" | "buckets.time.max" | "buckets.time.min" | "buckets.time.total" | "data.buckets" | "data.earliest_record" | "data.empty_buckets" | "data.input_bytes" | "data.input_fields" | "data.input_records" | "data.invalid_dates" | "data.last" | "data.last_empty_bucket" | "data.last_sparse_bucket" | "data.latest_record" | "data.missing_fields" | "data.out_of_order_timestamps" | "data.processed_fields" | "data.processed_records" | "data.sparse_buckets" | "forecasts.memory.avg" | "forecasts.memory.max" | "forecasts.memory.min" | "forecasts.memory.total" | "forecasts.records.avg" | "forecasts.records.max" | "forecasts.records.min" | "forecasts.records.total" | "forecasts.time.avg" | "forecasts.time.max" | "forecasts.time.min" | "forecasts.time.total" | "forecasts.total" | "id" | "model.bucket_allocation_failures" | "model.by_fields" | "model.bytes" | "model.bytes_exceeded" | "model.categorization_status" | "model.categorized_doc_count" | "model.dead_category_count" | "model.failed_category_count" | "model.frequent_category_count" | "model.log_time" | "model.memory_limit" | "model.memory_status" | "model.over_fields" | "model.partition_fields" | "model.rare_category_count" | "model.timestamp" | "model.total_category_count" | "node.address" | "node.ephemeral_id" | "node.id" | "node.name" | "opened_time" | "state") | Enum("assignment_explanation" | "buckets.count" | "buckets.time.exp_avg" | "buckets.time.exp_avg_hour" | "buckets.time.max" | "buckets.time.min" | "buckets.time.total" | "data.buckets" | "data.earliest_record" | "data.empty_buckets" | "data.input_bytes" | "data.input_fields" | "data.input_records" | "data.invalid_dates" | "data.last" | "data.last_empty_bucket" | "data.last_sparse_bucket" | "data.latest_record" | "data.missing_fields" | "data.out_of_order_timestamps" | "data.processed_fields" | "data.processed_records" | "data.sparse_buckets" | "forecasts.memory.avg" | "forecasts.memory.max" | "forecasts.memory.min" | "forecasts.memory.total" | "forecasts.records.avg" | "forecasts.records.max" | "forecasts.records.min" | "forecasts.records.total" | "forecasts.time.avg" | "forecasts.time.max" | "forecasts.time.min" | "forecasts.time.total" | "forecasts.total" | "id" | "model.bucket_allocation_failures" | "model.by_fields" | "model.bytes" | "model.bytes_exceeded" | "model.categorization_status" | "model.categorized_doc_count" | "model.dead_category_count" | "model.failed_category_count" | "model.frequent_category_count" | "model.log_time" | "model.memory_limit" | "model.memory_status" | "model.over_fields" | "model.partition_fields" | "model.rare_category_count" | "model.timestamp" | "model.total_category_count" | "node.address" | "node.ephemeral_id" | "node.id" | "node.name" | "opened_time" | "state")[]): List of column names to display. `s` (Optional, Enum("assignment_explanation" | "buckets.count" | "buckets.time.exp_avg" | "buckets.time.exp_avg_hour" | "buckets.time.max" | "buckets.time.min" | "buckets.time.total" | "data.buckets" | "data.earliest_record" | "data.empty_buckets" | "data.input_bytes" | "data.input_fields" | "data.input_records" | "data.invalid_dates" | "data.last" | "data.last_empty_bucket" | "data.last_sparse_bucket" | "data.latest_record" | "data.missing_fields" | "data.out_of_order_timestamps" | "data.processed_fields" | "data.processed_records" | "data.sparse_buckets" | "forecasts.memory.avg" | "forecasts.memory.max" | "forecasts.memory.min" | "forecasts.memory.total" | "forecasts.records.avg" | "forecasts.records.max" | "forecasts.records.min" | "forecasts.records.total" | "forecasts.time.avg" | "forecasts.time.max" | "forecasts.time.min" | "forecasts.time.total" | "forecasts.total" | "id" | "model.bucket_allocation_failures" | "model.by_fields" | "model.bytes" | "model.bytes_exceeded" | "model.categorization_status" | "model.categorized_doc_count" | "model.dead_category_count" | "model.failed_category_count" | "model.frequent_category_count" | "model.log_time" | "model.memory_limit" | "model.memory_status" | "model.over_fields" | "model.partition_fields" | "model.rare_category_count" | "model.timestamp" | "model.total_category_count" | "node.address" | "node.ephemeral_id" | "node.id" | "node.name" | "opened_time" | "state") | Enum("assignment_explanation" | "buckets.count" | "buckets.time.exp_avg" | "buckets.time.exp_avg_hour" | "buckets.time.max" | "buckets.time.min" | "buckets.time.total" | "data.buckets" | "data.earliest_record" | "data.empty_buckets" | "data.input_bytes" | "data.input_fields" | "data.input_records" | "data.invalid_dates" | "data.last" | "data.last_empty_bucket" | "data.last_sparse_bucket" | "data.latest_record" | "data.missing_fields" | "data.out_of_order_timestamps" | "data.processed_fields" | "data.processed_records" | "data.sparse_buckets" | "forecasts.memory.avg" | "forecasts.memory.max" | "forecasts.memory.min" | "forecasts.memory.total" | "forecasts.records.avg" | "forecasts.records.max" | "forecasts.records.min" | "forecasts.records.total" | "forecasts.time.avg" | "forecasts.time.max" | "forecasts.time.min" | "forecasts.time.total" | "forecasts.total" | "id" | "model.bucket_allocation_failures" | "model.by_fields" | "model.bytes" | "model.bytes_exceeded" | "model.categorization_status" | "model.categorized_doc_count" | "model.dead_category_count" | "model.failed_category_count" | "model.frequent_category_count" | "model.log_time" | "model.memory_limit" | "model.memory_status" | "model.over_fields" | "model.partition_fields" | "model.rare_category_count" | "model.timestamp" | "model.total_category_count" | "node.address" | "node.ephemeral_id" | "node.id" | "node.name" | "opened_time" | "state")[]): List of column names or column aliases used to sort the response. `time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d")): The unit used to display time values. -``` - -### ml_trained_models [_ml_trained_models] +If `true`, the API returns an empty jobs array when there are no matches and the subset of results when there +are partial matches. If `false`, the API returns a 404 status code when there are no matches or only partial +matches. +- **`bytes` (Optional, Enum("b" | "kb" | "mb" | "gb" | "tb" | "pb"))**: The unit used to display byte values. +- **`h` (Optional, Enum("assignment_explanation" | "buckets.count" | "buckets.time.exp_avg" | "buckets.time.exp_avg_hour" | "buckets.time.max" | "buckets.time.min" | "buckets.time.total" | "data.buckets" | "data.earliest_record" | "data.empty_buckets" | "data.input_bytes" | "data.input_fields" | "data.input_records" | "data.invalid_dates" | "data.last" | "data.last_empty_bucket" | "data.last_sparse_bucket" | "data.latest_record" | "data.missing_fields" | "data.out_of_order_timestamps" | "data.processed_fields" | "data.processed_records" | "data.sparse_buckets" | "forecasts.memory.avg" | "forecasts.memory.max" | "forecasts.memory.min" | "forecasts.memory.total" | "forecasts.records.avg" | "forecasts.records.max" | "forecasts.records.min" | "forecasts.records.total" | "forecasts.time.avg" | "forecasts.time.max" | "forecasts.time.min" | "forecasts.time.total" | "forecasts.total" | "id" | "model.bucket_allocation_failures" | "model.by_fields" | "model.bytes" | "model.bytes_exceeded" | "model.categorization_status" | "model.categorized_doc_count" | "model.dead_category_count" | "model.failed_category_count" | "model.frequent_category_count" | "model.log_time" | "model.memory_limit" | "model.memory_status" | "model.over_fields" | "model.partition_fields" | "model.rare_category_count" | "model.timestamp" | "model.total_category_count" | "node.address" | "node.ephemeral_id" | "node.id" | "node.name" | "opened_time" | "state") | Enum("assignment_explanation" | "buckets.count" | "buckets.time.exp_avg" | "buckets.time.exp_avg_hour" | "buckets.time.max" | "buckets.time.min" | "buckets.time.total" | "data.buckets" | "data.earliest_record" | "data.empty_buckets" | "data.input_bytes" | "data.input_fields" | "data.input_records" | "data.invalid_dates" | "data.last" | "data.last_empty_bucket" | "data.last_sparse_bucket" | "data.latest_record" | "data.missing_fields" | "data.out_of_order_timestamps" | "data.processed_fields" | "data.processed_records" | "data.sparse_buckets" | "forecasts.memory.avg" | "forecasts.memory.max" | "forecasts.memory.min" | "forecasts.memory.total" | "forecasts.records.avg" | "forecasts.records.max" | "forecasts.records.min" | "forecasts.records.total" | "forecasts.time.avg" | "forecasts.time.max" | "forecasts.time.min" | "forecasts.time.total" | "forecasts.total" | "id" | "model.bucket_allocation_failures" | "model.by_fields" | "model.bytes" | "model.bytes_exceeded" | "model.categorization_status" | "model.categorized_doc_count" | "model.dead_category_count" | "model.failed_category_count" | "model.frequent_category_count" | "model.log_time" | "model.memory_limit" | "model.memory_status" | "model.over_fields" | "model.partition_fields" | "model.rare_category_count" | "model.timestamp" | "model.total_category_count" | "node.address" | "node.ephemeral_id" | "node.id" | "node.name" | "opened_time" | "state")[])**: List of column names to display. +- **`s` (Optional, Enum("assignment_explanation" | "buckets.count" | "buckets.time.exp_avg" | "buckets.time.exp_avg_hour" | "buckets.time.max" | "buckets.time.min" | "buckets.time.total" | "data.buckets" | "data.earliest_record" | "data.empty_buckets" | "data.input_bytes" | "data.input_fields" | "data.input_records" | "data.invalid_dates" | "data.last" | "data.last_empty_bucket" | "data.last_sparse_bucket" | "data.latest_record" | "data.missing_fields" | "data.out_of_order_timestamps" | "data.processed_fields" | "data.processed_records" | "data.sparse_buckets" | "forecasts.memory.avg" | "forecasts.memory.max" | "forecasts.memory.min" | "forecasts.memory.total" | "forecasts.records.avg" | "forecasts.records.max" | "forecasts.records.min" | "forecasts.records.total" | "forecasts.time.avg" | "forecasts.time.max" | "forecasts.time.min" | "forecasts.time.total" | "forecasts.total" | "id" | "model.bucket_allocation_failures" | "model.by_fields" | "model.bytes" | "model.bytes_exceeded" | "model.categorization_status" | "model.categorized_doc_count" | "model.dead_category_count" | "model.failed_category_count" | "model.frequent_category_count" | "model.log_time" | "model.memory_limit" | "model.memory_status" | "model.over_fields" | "model.partition_fields" | "model.rare_category_count" | "model.timestamp" | "model.total_category_count" | "node.address" | "node.ephemeral_id" | "node.id" | "node.name" | "opened_time" | "state") | Enum("assignment_explanation" | "buckets.count" | "buckets.time.exp_avg" | "buckets.time.exp_avg_hour" | "buckets.time.max" | "buckets.time.min" | "buckets.time.total" | "data.buckets" | "data.earliest_record" | "data.empty_buckets" | "data.input_bytes" | "data.input_fields" | "data.input_records" | "data.invalid_dates" | "data.last" | "data.last_empty_bucket" | "data.last_sparse_bucket" | "data.latest_record" | "data.missing_fields" | "data.out_of_order_timestamps" | "data.processed_fields" | "data.processed_records" | "data.sparse_buckets" | "forecasts.memory.avg" | "forecasts.memory.max" | "forecasts.memory.min" | "forecasts.memory.total" | "forecasts.records.avg" | "forecasts.records.max" | "forecasts.records.min" | "forecasts.records.total" | "forecasts.time.avg" | "forecasts.time.max" | "forecasts.time.min" | "forecasts.time.total" | "forecasts.total" | "id" | "model.bucket_allocation_failures" | "model.by_fields" | "model.bytes" | "model.bytes_exceeded" | "model.categorization_status" | "model.categorized_doc_count" | "model.dead_category_count" | "model.failed_category_count" | "model.frequent_category_count" | "model.log_time" | "model.memory_limit" | "model.memory_status" | "model.over_fields" | "model.partition_fields" | "model.rare_category_count" | "model.timestamp" | "model.total_category_count" | "node.address" | "node.ephemeral_id" | "node.id" | "node.name" | "opened_time" | "state")[])**: List of column names or column aliases used to sort the response. +- **`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))**: The unit used to display time values. +## client.cat.mlTrainedModels [_cat.ml_trained_models] Get trained models. Get configuration and usage information about inference trained models. -::::{important} -CAT APIs are only intended for human consumption using the Kibana console or command line. They are not intended for use by applications. For application consumption, use the get trained models statistics API. -:::: - +IMPORTANT: CAT APIs are only intended for human consumption using the Kibana +console or command line. They are not intended for use by applications. For +application consumption, use the get trained models statistics API. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-ml-trained-models) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-cat-ml-trained-models) ```ts client.cat.mlTrainedModels({ ... }) ``` +### Arguments [_arguments_cat.ml_trained_models] -### Arguments [_arguments_60] - -* **Request (object):** - - * **`model_id` (Optional, string)**: A unique identifier for the trained model. - * **`allow_no_match` (Optional, boolean)**: Specifies what to do when the request: contains wildcard expressions and there are no models that match; contains the `_all` string or no identifiers and there are no matches; contains wildcard expressions and there are only partial matches. If `true`, the API returns an empty array when there are no matches and the subset of results when there are partial matches. If `false`, the API returns a 404 status code when there are no matches or only partial matches. - * **`bytes` (Optional, Enum("b" | "kb" | "mb" | "gb" | "tb" | "pb"))**: The unit used to display byte values. - * **`h` (Optional, Enum("create_time" | "created_by" | "data_frame_analytics_id" | "description" | "heap_size" | "id" | "ingest.count" | "ingest.current" | "ingest.failed" | "ingest.pipelines" | "ingest.time" | "license" | "operations" | "version") | Enum("create_time" | "created_by" | "data_frame_analytics_id" | "description" | "heap_size" | "id" | "ingest.count" | "ingest.current" | "ingest.failed" | "ingest.pipelines" | "ingest.time" | "license" | "operations" | "version")[])**: A list of column names to display. - * **`s` (Optional, Enum("create_time" | "created_by" | "data_frame_analytics_id" | "description" | "heap_size" | "id" | "ingest.count" | "ingest.current" | "ingest.failed" | "ingest.pipelines" | "ingest.time" | "license" | "operations" | "version") | Enum("create_time" | "created_by" | "data_frame_analytics_id" | "description" | "heap_size" | "id" | "ingest.count" | "ingest.current" | "ingest.failed" | "ingest.pipelines" | "ingest.time" | "license" | "operations" | "version")[])**: A list of column names or aliases used to sort the response. - * **`from` (Optional, number)**: Skips the specified number of transforms. - * **`size` (Optional, number)**: The maximum number of transforms to display. - * **`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))**: Unit used to display time values. - - - -### nodeattrs [_nodeattrs] +#### Request (object) [_request_cat.ml_trained_models] +- **`model_id` (Optional, string)**: A unique identifier for the trained model. +- **`allow_no_match` (Optional, boolean)**: Specifies what to do when the request: contains wildcard expressions and there are no models that match; contains the `_all` string or no identifiers and there are no matches; contains wildcard expressions and there are only partial matches. +If `true`, the API returns an empty array when there are no matches and the subset of results when there are partial matches. +If `false`, the API returns a 404 status code when there are no matches or only partial matches. +- **`bytes` (Optional, Enum("b" | "kb" | "mb" | "gb" | "tb" | "pb"))**: The unit used to display byte values. +- **`h` (Optional, Enum("create_time" | "created_by" | "data_frame_analytics_id" | "description" | "heap_size" | "id" | "ingest.count" | "ingest.current" | "ingest.failed" | "ingest.pipelines" | "ingest.time" | "license" | "operations" | "version") | Enum("create_time" | "created_by" | "data_frame_analytics_id" | "description" | "heap_size" | "id" | "ingest.count" | "ingest.current" | "ingest.failed" | "ingest.pipelines" | "ingest.time" | "license" | "operations" | "version")[])**: A list of column names to display. +- **`s` (Optional, Enum("create_time" | "created_by" | "data_frame_analytics_id" | "description" | "heap_size" | "id" | "ingest.count" | "ingest.current" | "ingest.failed" | "ingest.pipelines" | "ingest.time" | "license" | "operations" | "version") | Enum("create_time" | "created_by" | "data_frame_analytics_id" | "description" | "heap_size" | "id" | "ingest.count" | "ingest.current" | "ingest.failed" | "ingest.pipelines" | "ingest.time" | "license" | "operations" | "version")[])**: A list of column names or aliases used to sort the response. +- **`from` (Optional, number)**: Skips the specified number of transforms. +- **`size` (Optional, number)**: The maximum number of transforms to display. +- **`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))**: Unit used to display time values. +## client.cat.nodeattrs [_cat.nodeattrs] Get node attribute information. -Get information about custom node attributes. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API. +Get information about custom node attributes. +IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-nodeattrs) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-cat-nodeattrs) ```ts client.cat.nodeattrs({ ... }) ``` +### Arguments [_arguments_cat.nodeattrs] -### Arguments [_arguments_61] - -* **Request (object):** - - * **`local` (Optional, boolean)**: If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. - - - -### nodes [_nodes] +#### Request (object) [_request_cat.nodeattrs] +- **`h` (Optional, string | string[])**: List of columns to appear in the response. Supports simple wildcards. +- **`s` (Optional, string | string[])**: List of columns that determine how the table should be sorted. +Sorting defaults to ascending and can be changed by setting `:asc` +or `:desc` as a suffix to the column name. +- **`local` (Optional, boolean)**: If `true`, the request computes the list of selected nodes from the +local cluster state. If `false` the list of selected nodes are computed +from the cluster state of the master node. In both cases the coordinating +node will send requests for further information to each selected node. +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +## client.cat.nodes [_cat.nodes] Get node information. -Get information about the nodes in a cluster. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API. +Get information about the nodes in a cluster. +IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-nodes) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-cat-nodes) ```ts client.cat.nodes({ ... }) ``` +### Arguments [_arguments_cat.nodes] -### Arguments [_arguments_62] - -* **Request (object):** - - * **`bytes` (Optional, Enum("b" | "kb" | "mb" | "gb" | "tb" | "pb"))**: The unit used to display byte values. - * **`full_id` (Optional, boolean | string)**: If `true`, return the full node ID. If `false`, return the shortened node ID. - * **`include_unloaded_segments` (Optional, boolean)**: If true, the response includes information from segments that are not loaded into memory. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. - * **`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))**: Unit used to display time values. - - - -### pending_tasks [_pending_tasks] +#### Request (object) [_request_cat.nodes] +- **`bytes` (Optional, Enum("b" | "kb" | "mb" | "gb" | "tb" | "pb"))**: The unit used to display byte values. +- **`full_id` (Optional, boolean | string)**: If `true`, return the full node ID. If `false`, return the shortened node ID. +- **`include_unloaded_segments` (Optional, boolean)**: If true, the response includes information from segments that are not loaded into memory. +- **`h` (Optional, string | string[])**: List of columns to appear in the response. Supports simple wildcards. +- **`s` (Optional, string | string[])**: List of columns that determine how the table should be sorted. +Sorting defaults to ascending and can be changed by setting `:asc` +or `:desc` as a suffix to the column name. +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +- **`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))**: Unit used to display time values. +## client.cat.pendingTasks [_cat.pending_tasks] Get pending task information. -Get information about cluster-level changes that have not yet taken effect. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the pending cluster tasks API. +Get information about cluster-level changes that have not yet taken effect. +IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the pending cluster tasks API. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-pending-tasks) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-cat-pending-tasks) ```ts client.cat.pendingTasks({ ... }) ``` +### Arguments [_arguments_cat.pending_tasks] -### Arguments [_arguments_63] - -* **Request (object):** - - * **`local` (Optional, boolean)**: If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. - * **`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))**: Unit used to display time values. - - - -### plugins [_plugins] +#### Request (object) [_request_cat.pending_tasks] +- **`h` (Optional, string | string[])**: List of columns to appear in the response. Supports simple wildcards. +- **`s` (Optional, string | string[])**: List of columns that determine how the table should be sorted. +Sorting defaults to ascending and can be changed by setting `:asc` +or `:desc` as a suffix to the column name. +- **`local` (Optional, boolean)**: If `true`, the request computes the list of selected nodes from the +local cluster state. If `false` the list of selected nodes are computed +from the cluster state of the master node. In both cases the coordinating +node will send requests for further information to each selected node. +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +- **`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))**: Unit used to display time values. +## client.cat.plugins [_cat.plugins] Get plugin information. -Get a list of plugins running on each node of a cluster. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API. +Get a list of plugins running on each node of a cluster. +IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-plugins) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-cat-plugins) ```ts client.cat.plugins({ ... }) ``` +### Arguments [_arguments_cat.plugins] -### Arguments [_arguments_64] - -* **Request (object):** - - * **`include_bootstrap` (Optional, boolean)**: Include bootstrap plugins in the response - * **`local` (Optional, boolean)**: If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. - - - -### recovery [_recovery] +#### Request (object) [_request_cat.plugins] +- **`h` (Optional, string | string[])**: List of columns to appear in the response. Supports simple wildcards. +- **`s` (Optional, string | string[])**: List of columns that determine how the table should be sorted. +Sorting defaults to ascending and can be changed by setting `:asc` +or `:desc` as a suffix to the column name. +- **`include_bootstrap` (Optional, boolean)**: Include bootstrap plugins in the response +- **`local` (Optional, boolean)**: If `true`, the request computes the list of selected nodes from the +local cluster state. If `false` the list of selected nodes are computed +from the cluster state of the master node. In both cases the coordinating +node will send requests for further information to each selected node. +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +## client.cat.recovery [_cat.recovery] Get shard recovery information. -Get information about ongoing and completed shard recoveries. Shard recovery is the process of initializing a shard copy, such as restoring a primary shard from a snapshot or syncing a replica shard from a primary shard. When a shard recovery completes, the recovered shard is available for search and indexing. For data streams, the API returns information about the stream’s backing indices. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the index recovery API. +Get information about ongoing and completed shard recoveries. +Shard recovery is the process of initializing a shard copy, such as restoring a primary shard from a snapshot or syncing a replica shard from a primary shard. When a shard recovery completes, the recovered shard is available for search and indexing. +For data streams, the API returns information about the stream’s backing indices. +IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the index recovery API. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-recovery) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-cat-recovery) ```ts client.cat.recovery({ ... }) ``` +### Arguments [_arguments_cat.recovery] -### Arguments [_arguments_65] - -* **Request (object):** - - * **`index` (Optional, string | string[])**: A list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. - * **`active_only` (Optional, boolean)**: If `true`, the response only includes ongoing shard recoveries. - * **`bytes` (Optional, Enum("b" | "kb" | "mb" | "gb" | "tb" | "pb"))**: The unit used to display byte values. - * **`detailed` (Optional, boolean)**: If `true`, the response includes detailed information about shard recoveries. - * **`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))**: Unit used to display time values. - - - -### repositories [_repositories] +#### Request (object) [_request_cat.recovery] +- **`index` (Optional, string | string[])**: A list of data streams, indices, and aliases used to limit the request. +Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. +- **`active_only` (Optional, boolean)**: If `true`, the response only includes ongoing shard recoveries. +- **`bytes` (Optional, Enum("b" | "kb" | "mb" | "gb" | "tb" | "pb"))**: The unit used to display byte values. +- **`detailed` (Optional, boolean)**: If `true`, the response includes detailed information about shard recoveries. +- **`h` (Optional, string | string[])**: List of columns to appear in the response. Supports simple wildcards. +- **`s` (Optional, string | string[])**: List of columns that determine how the table should be sorted. +Sorting defaults to ascending and can be changed by setting `:asc` +or `:desc` as a suffix to the column name. +- **`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))**: Unit used to display time values. +## client.cat.repositories [_cat.repositories] Get snapshot repository information. -Get a list of snapshot repositories for a cluster. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get snapshot repository API. +Get a list of snapshot repositories for a cluster. +IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get snapshot repository API. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-repositories) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-cat-repositories) ```ts client.cat.repositories({ ... }) ``` +### Arguments [_arguments_cat.repositories] -### Arguments [_arguments_66] - -* **Request (object):** - - * **`local` (Optional, boolean)**: If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. - - - -### segments [_segments] +#### Request (object) [_request_cat.repositories] +- **`h` (Optional, string | string[])**: List of columns to appear in the response. Supports simple wildcards. +- **`s` (Optional, string | string[])**: List of columns that determine how the table should be sorted. +Sorting defaults to ascending and can be changed by setting `:asc` +or `:desc` as a suffix to the column name. +- **`local` (Optional, boolean)**: If `true`, the request computes the list of selected nodes from the +local cluster state. If `false` the list of selected nodes are computed +from the cluster state of the master node. In both cases the coordinating +node will send requests for further information to each selected node. +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +## client.cat.segments [_cat.segments] Get segment information. -Get low-level information about the Lucene segments in index shards. For data streams, the API returns information about the backing indices. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the index segments API. +Get low-level information about the Lucene segments in index shards. +For data streams, the API returns information about the backing indices. +IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the index segments API. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-segments) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-cat-segments) ```ts client.cat.segments({ ... }) ``` - -### Arguments [_arguments_67] - -* **Request (object):** - - * **`index` (Optional, string | string[])**: A list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. - * **`bytes` (Optional, Enum("b" | "kb" | "mb" | "gb" | "tb" | "pb"))**: The unit used to display byte values. - * **`local` (Optional, boolean)**: If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. - - - -### shards [_shards] - +### Arguments [_arguments_cat.segments] + +#### Request (object) [_request_cat.segments] +- **`index` (Optional, string | string[])**: A list of data streams, indices, and aliases used to limit the request. +Supports wildcards (`*`). +To target all data streams and indices, omit this parameter or use `*` or `_all`. +- **`bytes` (Optional, Enum("b" | "kb" | "mb" | "gb" | "tb" | "pb"))**: The unit used to display byte values. +- **`h` (Optional, string | string[])**: List of columns to appear in the response. Supports simple wildcards. +- **`s` (Optional, string | string[])**: List of columns that determine how the table should be sorted. +Sorting defaults to ascending and can be changed by setting `:asc` +or `:desc` as a suffix to the column name. +- **`local` (Optional, boolean)**: If `true`, the request computes the list of selected nodes from the +local cluster state. If `false` the list of selected nodes are computed +from the cluster state of the master node. In both cases the coordinating +node will send requests for further information to each selected node. +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. + +## client.cat.shards [_cat.shards] Get shard information. -Get information about the shards in a cluster. For data streams, the API returns information about the backing indices. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. +Get information about the shards in a cluster. +For data streams, the API returns information about the backing indices. +IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-shards) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-cat-shards) ```ts client.cat.shards({ ... }) ``` +### Arguments [_arguments_cat.shards] -### Arguments [_arguments_68] - -* **Request (object):** - - * **`index` (Optional, string | string[])**: A list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. - * **`bytes` (Optional, Enum("b" | "kb" | "mb" | "gb" | "tb" | "pb"))**: The unit used to display byte values. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. - * **`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))**: Unit used to display time values. - - - -### snapshots [_snapshots] +#### Request (object) [_request_cat.shards] +- **`index` (Optional, string | string[])**: A list of data streams, indices, and aliases used to limit the request. +Supports wildcards (`*`). +To target all data streams and indices, omit this parameter or use `*` or `_all`. +- **`bytes` (Optional, Enum("b" | "kb" | "mb" | "gb" | "tb" | "pb"))**: The unit used to display byte values. +- **`h` (Optional, string | string[])**: List of columns to appear in the response. Supports simple wildcards. +- **`s` (Optional, string | string[])**: List of columns that determine how the table should be sorted. +Sorting defaults to ascending and can be changed by setting `:asc` +or `:desc` as a suffix to the column name. +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +- **`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))**: Unit used to display time values. +## client.cat.snapshots [_cat.snapshots] Get snapshot information. -Get information about the snapshots stored in one or more repositories. A snapshot is a backup of an index or running Elasticsearch cluster. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get snapshot API. +Get information about the snapshots stored in one or more repositories. +A snapshot is a backup of an index or running Elasticsearch cluster. +IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get snapshot API. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-snapshots) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-cat-snapshots) ```ts client.cat.snapshots({ ... }) ``` +### Arguments [_arguments_cat.snapshots] -### Arguments [_arguments_69] - -* **Request (object):** - - * **`repository` (Optional, string | string[])**: A list of snapshot repositories used to limit the request. Accepts wildcard expressions. `_all` returns all repositories. If any repository fails during the request, Elasticsearch returns an error. - * **`ignore_unavailable` (Optional, boolean)**: If `true`, the response does not include information from unavailable snapshots. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. - * **`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))**: Unit used to display time values. - - - -### tasks [_tasks] +#### Request (object) [_request_cat.snapshots] +- **`repository` (Optional, string | string[])**: A list of snapshot repositories used to limit the request. +Accepts wildcard expressions. +`_all` returns all repositories. +If any repository fails during the request, Elasticsearch returns an error. +- **`ignore_unavailable` (Optional, boolean)**: If `true`, the response does not include information from unavailable snapshots. +- **`h` (Optional, string | string[])**: List of columns to appear in the response. Supports simple wildcards. +- **`s` (Optional, string | string[])**: List of columns that determine how the table should be sorted. +Sorting defaults to ascending and can be changed by setting `:asc` +or `:desc` as a suffix to the column name. +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +- **`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))**: Unit used to display time values. +## client.cat.tasks [_cat.tasks] Get task information. -Get information about tasks currently running in the cluster. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the task management API. +Get information about tasks currently running in the cluster. +IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the task management API. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-tasks) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-cat-tasks) ```ts client.cat.tasks({ ... }) ``` +### Arguments [_arguments_cat.tasks] -### Arguments [_arguments_70] - -* **Request (object):** - - * **`actions` (Optional, string[])**: The task action names, which are used to limit the response. - * **`detailed` (Optional, boolean)**: If `true`, the response includes detailed information about shard recoveries. - * **`nodes` (Optional, string[])**: Unique node identifiers, which are used to limit the response. - * **`parent_task_id` (Optional, string)**: The parent task identifier, which is used to limit the response. - * **`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))**: Unit used to display time values. - * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - * **`wait_for_completion` (Optional, boolean)**: If `true`, the request blocks until the task has completed. - - - -### templates [_templates] +#### Request (object) [_request_cat.tasks] +- **`actions` (Optional, string[])**: The task action names, which are used to limit the response. +- **`detailed` (Optional, boolean)**: If `true`, the response includes detailed information about shard recoveries. +- **`nodes` (Optional, string[])**: Unique node identifiers, which are used to limit the response. +- **`parent_task_id` (Optional, string)**: The parent task identifier, which is used to limit the response. +- **`h` (Optional, string | string[])**: List of columns to appear in the response. Supports simple wildcards. +- **`s` (Optional, string | string[])**: List of columns that determine how the table should be sorted. +Sorting defaults to ascending and can be changed by setting `:asc` +or `:desc` as a suffix to the column name. +- **`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))**: Unit used to display time values. +- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. +- **`wait_for_completion` (Optional, boolean)**: If `true`, the request blocks until the task has completed. +## client.cat.templates [_cat.templates] Get index template information. -Get information about the index templates in a cluster. You can use index templates to apply index settings and field mappings to new indices at creation. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get index template API. +Get information about the index templates in a cluster. +You can use index templates to apply index settings and field mappings to new indices at creation. +IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get index template API. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-templates) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-cat-templates) ```ts client.cat.templates({ ... }) ``` +### Arguments [_arguments_cat.templates] -### Arguments [_arguments_71] - -* **Request (object):** - - * **`name` (Optional, string)**: The name of the template to return. Accepts wildcard expressions. If omitted, all templates are returned. - * **`local` (Optional, boolean)**: If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. - - - -### thread_pool [_thread_pool] +#### Request (object) [_request_cat.templates] +- **`name` (Optional, string)**: The name of the template to return. +Accepts wildcard expressions. If omitted, all templates are returned. +- **`h` (Optional, string | string[])**: List of columns to appear in the response. Supports simple wildcards. +- **`s` (Optional, string | string[])**: List of columns that determine how the table should be sorted. +Sorting defaults to ascending and can be changed by setting `:asc` +or `:desc` as a suffix to the column name. +- **`local` (Optional, boolean)**: If `true`, the request computes the list of selected nodes from the +local cluster state. If `false` the list of selected nodes are computed +from the cluster state of the master node. In both cases the coordinating +node will send requests for further information to each selected node. +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +## client.cat.threadPool [_cat.thread_pool] Get thread pool statistics. -Get thread pool statistics for each node in a cluster. Returned information includes all built-in thread pools and custom thread pools. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API. +Get thread pool statistics for each node in a cluster. +Returned information includes all built-in thread pools and custom thread pools. +IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-thread-pool) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-cat-thread-pool) ```ts client.cat.threadPool({ ... }) ``` +### Arguments [_arguments_cat.thread_pool] -### Arguments [_arguments_72] - -* **Request (object):** - - * **`thread_pool_patterns` (Optional, string | string[])**: A list of thread pool names used to limit the request. Accepts wildcard expressions. - * **`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))**: The unit used to display time values. - * **`local` (Optional, boolean)**: If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. - - - -### transforms [_transforms] +#### Request (object) [_request_cat.thread_pool] +- **`thread_pool_patterns` (Optional, string | string[])**: A list of thread pool names used to limit the request. +Accepts wildcard expressions. +- **`h` (Optional, string | string[])**: List of columns to appear in the response. Supports simple wildcards. +- **`s` (Optional, string | string[])**: List of columns that determine how the table should be sorted. +Sorting defaults to ascending and can be changed by setting `:asc` +or `:desc` as a suffix to the column name. +- **`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))**: The unit used to display time values. +- **`local` (Optional, boolean)**: If `true`, the request computes the list of selected nodes from the +local cluster state. If `false` the list of selected nodes are computed +from the cluster state of the master node. In both cases the coordinating +node will send requests for further information to each selected node. +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +## client.cat.transforms [_cat.transforms] Get transform information. Get configuration and usage information about transforms. -CAT APIs are only intended for human consumption using the Kibana console or command line. They are not intended for use by applications. For application consumption, use the get transform statistics API. +CAT APIs are only intended for human consumption using the Kibana +console or command line. They are not intended for use by applications. For +application consumption, use the get transform statistics API. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-transforms) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-cat-transforms) ```ts client.cat.transforms({ ... }) ``` +### Arguments [_arguments_cat.transforms] -### Arguments [_arguments_73] - -* **Request (object):** +#### Request (object) [_request_cat.transforms] +- **`transform_id` (Optional, string)**: A transform identifier or a wildcard expression. +If you do not specify one of these options, the API returns information for all transforms. +- **`allow_no_match` (Optional, boolean)**: Specifies what to do when the request: contains wildcard expressions and there are no transforms that match; contains the `_all` string or no identifiers and there are no matches; contains wildcard expressions and there are only partial matches. +If `true`, it returns an empty transforms array when there are no matches and the subset of results when there are partial matches. +If `false`, the request returns a 404 status code when there are no matches or only partial matches. +- **`from` (Optional, number)**: Skips the specified number of transforms. +- **`h` (Optional, Enum("changes_last_detection_time" | "checkpoint" | "checkpoint_duration_time_exp_avg" | "checkpoint_progress" | "create_time" | "delete_time" | "description" | "dest_index" | "documents_deleted" | "documents_indexed" | "docs_per_second" | "documents_processed" | "frequency" | "id" | "index_failure" | "index_time" | "index_total" | "indexed_documents_exp_avg" | "last_search_time" | "max_page_search_size" | "pages_processed" | "pipeline" | "processed_documents_exp_avg" | "processing_time" | "reason" | "search_failure" | "search_time" | "search_total" | "source_index" | "state" | "transform_type" | "trigger_count" | "version") | Enum("changes_last_detection_time" | "checkpoint" | "checkpoint_duration_time_exp_avg" | "checkpoint_progress" | "create_time" | "delete_time" | "description" | "dest_index" | "documents_deleted" | "documents_indexed" | "docs_per_second" | "documents_processed" | "frequency" | "id" | "index_failure" | "index_time" | "index_total" | "indexed_documents_exp_avg" | "last_search_time" | "max_page_search_size" | "pages_processed" | "pipeline" | "processed_documents_exp_avg" | "processing_time" | "reason" | "search_failure" | "search_time" | "search_total" | "source_index" | "state" | "transform_type" | "trigger_count" | "version")[])**: List of column names to display. +- **`s` (Optional, Enum("changes_last_detection_time" | "checkpoint" | "checkpoint_duration_time_exp_avg" | "checkpoint_progress" | "create_time" | "delete_time" | "description" | "dest_index" | "documents_deleted" | "documents_indexed" | "docs_per_second" | "documents_processed" | "frequency" | "id" | "index_failure" | "index_time" | "index_total" | "indexed_documents_exp_avg" | "last_search_time" | "max_page_search_size" | "pages_processed" | "pipeline" | "processed_documents_exp_avg" | "processing_time" | "reason" | "search_failure" | "search_time" | "search_total" | "source_index" | "state" | "transform_type" | "trigger_count" | "version") | Enum("changes_last_detection_time" | "checkpoint" | "checkpoint_duration_time_exp_avg" | "checkpoint_progress" | "create_time" | "delete_time" | "description" | "dest_index" | "documents_deleted" | "documents_indexed" | "docs_per_second" | "documents_processed" | "frequency" | "id" | "index_failure" | "index_time" | "index_total" | "indexed_documents_exp_avg" | "last_search_time" | "max_page_search_size" | "pages_processed" | "pipeline" | "processed_documents_exp_avg" | "processing_time" | "reason" | "search_failure" | "search_time" | "search_total" | "source_index" | "state" | "transform_type" | "trigger_count" | "version")[])**: List of column names or column aliases used to sort the response. +- **`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))**: The unit used to display time values. +- **`size` (Optional, number)**: The maximum number of transforms to obtain. - * **`transform_id` (Optional, string)**: A transform identifier or a wildcard expression. If you do not specify one of these options, the API returns information for all transforms. - * **`allow_no_match` (Optional, boolean)**: Specifies what to do when the request: contains wildcard expressions and there are no transforms that match; contains the `_all` string or no identifiers and there are no matches; contains wildcard expressions and there are only partial matches. If `true`, it returns an empty transforms array when there are no matches and the subset of results when there are partial matches. If `false`, the request returns a 404 status code when there are no matches or only partial matches. - * **`from` (Optional, number)**: Skips the specified number of transforms. - * **`h` (Optional, Enum("changes_last_detection_time" | "checkpoint" | "checkpoint_duration_time_exp_avg" | "checkpoint_progress" | "create_time" | "delete_time" | "description" | "dest_index" | "documents_deleted" | "documents_indexed" | "docs_per_second" | "documents_processed" | "frequency" | "id" | "index_failure" | "index_time" | "index_total" | "indexed_documents_exp_avg" | "last_search_time" | "max_page_search_size" | "pages_processed" | "pipeline" | "processed_documents_exp_avg" | "processing_time" | "reason" | "search_failure" | "search_time" | "search_total" | "source_index" | "state" | "transform_type" | "trigger_count" | "version") | Enum("changes_last_detection_time" | "checkpoint" | "checkpoint_duration_time_exp_avg" | "checkpoint_progress" | "create_time" | "delete_time" | "description" | "dest_index" | "documents_deleted" | "documents_indexed" | "docs_per_second" | "documents_processed" | "frequency" | "id" | "index_failure" | "index_time" | "index_total" | "indexed_documents_exp_avg" | "last_search_time" | "max_page_search_size" | "pages_processed" | "pipeline" | "processed_documents_exp_avg" | "processing_time" | "reason" | "search_failure" | "search_time" | "search_total" | "source_index" | "state" | "transform_type" | "trigger_count" | "version")[])**: List of column names to display. - * **`s` (Optional, Enum("changes_last_detection_time" | "checkpoint" | "checkpoint_duration_time_exp_avg" | "checkpoint_progress" | "create_time" | "delete_time" | "description" | "dest_index" | "documents_deleted" | "documents_indexed" | "docs_per_second" | "documents_processed" | "frequency" | "id" | "index_failure" | "index_time" | "index_total" | "indexed_documents_exp_avg" | "last_search_time" | "max_page_search_size" | "pages_processed" | "pipeline" | "processed_documents_exp_avg" | "processing_time" | "reason" | "search_failure" | "search_time" | "search_total" | "source_index" | "state" | "transform_type" | "trigger_count" | "version") | Enum("changes_last_detection_time" | "checkpoint" | "checkpoint_duration_time_exp_avg" | "checkpoint_progress" | "create_time" | "delete_time" | "description" | "dest_index" | "documents_deleted" | "documents_indexed" | "docs_per_second" | "documents_processed" | "frequency" | "id" | "index_failure" | "index_time" | "index_total" | "indexed_documents_exp_avg" | "last_search_time" | "max_page_search_size" | "pages_processed" | "pipeline" | "processed_documents_exp_avg" | "processing_time" | "reason" | "search_failure" | "search_time" | "search_total" | "source_index" | "state" | "transform_type" | "trigger_count" | "version")[])**: List of column names or column aliases used to sort the response. - * **`time` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))**: The unit used to display time values. - * **`size` (Optional, number)**: The maximum number of transforms to obtain. +## client.ccr.deleteAutoFollowPattern [_ccr.delete_auto_follow_pattern] +Delete auto-follow patterns. +Delete a collection of cross-cluster replication auto-follow patterns. +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ccr-delete-auto-follow-pattern) -## ccr [_ccr] +```ts +client.ccr.deleteAutoFollowPattern({ name }) +``` +### Arguments [_arguments_ccr.delete_auto_follow_pattern] -### delete_auto_follow_pattern [_delete_auto_follow_pattern] +#### Request (object) [_request_ccr.delete_auto_follow_pattern] +- **`name` (string)**: The auto-follow pattern collection to delete. +- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. +If the master node is not available before the timeout expires, the request fails and returns an error. +It can also be set to `-1` to indicate that the request should never timeout. -Delete auto-follow patterns. Delete a collection of cross-cluster replication auto-follow patterns. +## client.ccr.follow [_ccr.follow] +Create a follower. +Create a cross-cluster replication follower index that follows a specific leader index. +When the API returns, the follower index exists and cross-cluster replication starts replicating operations from the leader index to the follower index. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-delete-auto-follow-pattern) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ccr-follow) ```ts -client.ccr.deleteAutoFollowPattern({ name }) +client.ccr.follow({ index, leader_index, remote_cluster }) ``` +### Arguments [_arguments_ccr.follow] + +#### Request (object) [_request_ccr.follow] +- **`index` (string)**: The name of the follower index. +- **`leader_index` (string)**: The name of the index in the leader cluster to follow. +- **`remote_cluster` (string)**: The remote cluster containing the leader index. +- **`data_stream_name` (Optional, string)**: If the leader index is part of a data stream, the name to which the local data stream for the followed index should be renamed. +- **`max_outstanding_read_requests` (Optional, number)**: The maximum number of outstanding reads requests from the remote cluster. +- **`max_outstanding_write_requests` (Optional, number)**: The maximum number of outstanding write requests on the follower. +- **`max_read_request_operation_count` (Optional, number)**: The maximum number of operations to pull per read from the remote cluster. +- **`max_read_request_size` (Optional, number | string)**: The maximum size in bytes of per read of a batch of operations pulled from the remote cluster. +- **`max_retry_delay` (Optional, string | -1 | 0)**: The maximum time to wait before retrying an operation that failed exceptionally. An exponential backoff strategy is employed when +retrying. +- **`max_write_buffer_count` (Optional, number)**: The maximum number of operations that can be queued for writing. When this limit is reached, reads from the remote cluster will be +deferred until the number of queued operations goes below the limit. +- **`max_write_buffer_size` (Optional, number | string)**: The maximum total bytes of operations that can be queued for writing. When this limit is reached, reads from the remote cluster will +be deferred until the total bytes of queued operations goes below the limit. +- **`max_write_request_operation_count` (Optional, number)**: The maximum number of operations per bulk write request executed on the follower. +- **`max_write_request_size` (Optional, number | string)**: The maximum total bytes of operations per bulk write request executed on the follower. +- **`read_poll_timeout` (Optional, string | -1 | 0)**: The maximum time to wait for new operations on the remote cluster when the follower index is synchronized with the leader index. +When the timeout has elapsed, the poll for operations will return to the follower so that it can update some statistics. +Then the follower will immediately attempt to read from the leader again. +- **`settings` (Optional, { index, mode, routing_path, soft_deletes, sort, number_of_shards, number_of_replicas, number_of_routing_shards, check_on_startup, codec, routing_partition_size, load_fixed_bitset_filters_eagerly, hidden, auto_expand_replicas, merge, search, refresh_interval, max_result_window, max_inner_result_window, max_rescore_window, max_docvalue_fields_search, max_script_fields, max_ngram_diff, max_shingle_diff, blocks, max_refresh_listeners, analyze, highlight, max_terms_count, max_regex_length, routing, gc_deletes, default_pipeline, final_pipeline, lifecycle, provided_name, creation_date, creation_date_string, uuid, version, verified_before_close, format, max_slices_per_scroll, translog, query_string, priority, top_metrics_max_size, analysis, settings, time_series, queries, similarity, mapping, indexing.slowlog, indexing_pressure, store })**: Settings to override from the leader index. +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +- **`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))**: Specifies the number of shards to wait on being active before responding. This defaults to waiting on none of the shards to be +active. +A shard must be restored from the leader index before being active. Restoring a follower shard requires transferring all the +remote Lucene segment files to the follower index. + +## client.ccr.followInfo [_ccr.follow_info] +Get follower information. + +Get information about all cross-cluster replication follower indices. +For example, the results include follower index names, leader index names, replication options, and whether the follower indices are active or paused. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ccr-follow-info) -### Arguments [_arguments_74] - -* **Request (object):** - - * **`name` (string)**: The name of the auto follow pattern. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +```ts +client.ccr.followInfo({ index }) +``` +### Arguments [_arguments_ccr.follow_info] +#### Request (object) [_request_ccr.follow_info] +- **`index` (string | string[])**: A comma-delimited list of follower index patterns. +- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. +If the master node is not available before the timeout expires, the request fails and returns an error. +It can also be set to `-1` to indicate that the request should never timeout. -### follow [_follow] +## client.ccr.followStats [_ccr.follow_stats] +Get follower stats. -Create a follower. Create a cross-cluster replication follower index that follows a specific leader index. When the API returns, the follower index exists and cross-cluster replication starts replicating operations from the leader index to the follower index. +Get cross-cluster replication follower stats. +The API returns shard-level stats about the "following tasks" associated with each shard for the specified indices. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-follow) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ccr-follow-stats) ```ts -client.ccr.follow({ index, leader_index, remote_cluster }) +client.ccr.followStats({ index }) ``` +### Arguments [_arguments_ccr.follow_stats] -### Arguments [_arguments_75] +#### Request (object) [_request_ccr.follow_stats] +- **`index` (string | string[])**: A comma-delimited list of index patterns. +- **`timeout` (Optional, string | -1 | 0)**: The period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. -* **Request (object):** +## client.ccr.forgetFollower [_ccr.forget_follower] +Forget a follower. +Remove the cross-cluster replication follower retention leases from the leader. - * **`index` (string)**: The name of the follower index. - * **`leader_index` (string)**: The name of the index in the leader cluster to follow. - * **`remote_cluster` (string)**: The remote cluster containing the leader index. - * **`data_stream_name` (Optional, string)**: If the leader index is part of a data stream, the name to which the local data stream for the followed index should be renamed. - * **`max_outstanding_read_requests` (Optional, number)**: The maximum number of outstanding reads requests from the remote cluster. - * **`max_outstanding_write_requests` (Optional, number)**: The maximum number of outstanding write requests on the follower. - * **`max_read_request_operation_count` (Optional, number)**: The maximum number of operations to pull per read from the remote cluster. - * **`max_read_request_size` (Optional, number | string)**: The maximum size in bytes of per read of a batch of operations pulled from the remote cluster. - * **`max_retry_delay` (Optional, string | -1 | 0)**: The maximum time to wait before retrying an operation that failed exceptionally. An exponential backoff strategy is employed when retrying. - * **`max_write_buffer_count` (Optional, number)**: The maximum number of operations that can be queued for writing. When this limit is reached, reads from the remote cluster will be deferred until the number of queued operations goes below the limit. - * **`max_write_buffer_size` (Optional, number | string)**: The maximum total bytes of operations that can be queued for writing. When this limit is reached, reads from the remote cluster will be deferred until the total bytes of queued operations goes below the limit. - * **`max_write_request_operation_count` (Optional, number)**: The maximum number of operations per bulk write request executed on the follower. - * **`max_write_request_size` (Optional, number | string)**: The maximum total bytes of operations per bulk write request executed on the follower. - * **`read_poll_timeout` (Optional, string | -1 | 0)**: The maximum time to wait for new operations on the remote cluster when the follower index is synchronized with the leader index. When the timeout has elapsed, the poll for operations will return to the follower so that it can update some statistics. Then the follower will immediately attempt to read from the leader again. - * **`settings` (Optional, { index, mode, routing_path, soft_deletes, sort, number_of_shards, number_of_replicas, number_of_routing_shards, check_on_startup, codec, routing_partition_size, load_fixed_bitset_filters_eagerly, hidden, auto_expand_replicas, merge, search, refresh_interval, max_result_window, max_inner_result_window, max_rescore_window, max_docvalue_fields_search, max_script_fields, max_ngram_diff, max_shingle_diff, blocks, max_refresh_listeners, analyze, highlight, max_terms_count, max_regex_length, routing, gc_deletes, default_pipeline, final_pipeline, lifecycle, provided_name, creation_date, creation_date_string, uuid, version, verified_before_close, format, max_slices_per_scroll, translog, query_string, priority, top_metrics_max_size, analysis, settings, time_series, queries, similarity, mapping, indexing.slowlog, indexing_pressure, store })**: Settings to override from the leader index. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. - * **`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))**: Specifies the number of shards to wait on being active before responding. This defaults to waiting on none of the shards to be active. A shard must be restored from the leader index before being active. Restoring a follower shard requires transferring all the remote Lucene segment files to the follower index. +A following index takes out retention leases on its leader index. +These leases are used to increase the likelihood that the shards of the leader index retain the history of operations that the shards of the following index need to run replication. +When a follower index is converted to a regular index by the unfollow API (either by directly calling the API or by index lifecycle management tasks), these leases are removed. +However, removal of the leases can fail, for example when the remote cluster containing the leader index is unavailable. +While the leases will eventually expire on their own, their extended existence can cause the leader index to hold more history than necessary and prevent index lifecycle management from performing some operations on the leader index. +This API exists to enable manually removing the leases when the unfollow API is unable to do so. +NOTE: This API does not stop replication by a following index. If you use this API with a follower index that is still actively following, the following index will add back retention leases on the leader. +The only purpose of this API is to handle the case of failure to remove the following retention leases after the unfollow API is invoked. - -### follow_info [_follow_info] - -Get follower information. Get information about all cross-cluster replication follower indices. For example, the results include follower index names, leader index names, replication options, and whether the follower indices are active or paused. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-follow-info) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ccr-forget-follower) ```ts -client.ccr.followInfo({ index }) +client.ccr.forgetFollower({ index }) ``` +### Arguments [_arguments_ccr.forget_follower] -### Arguments [_arguments_76] +#### Request (object) [_request_ccr.forget_follower] +- **`index` (string)**: the name of the leader index for which specified follower retention leases should be removed +- **`follower_cluster` (Optional, string)** +- **`follower_index` (Optional, string)** +- **`follower_index_uuid` (Optional, string)** +- **`leader_remote_cluster` (Optional, string)** +- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. -* **Request (object):** +## client.ccr.getAutoFollowPattern [_ccr.get_auto_follow_pattern] +Get auto-follow patterns. - * **`index` (string | string[])**: A list of index patterns; use `_all` to perform the operation on all indices - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +Get cross-cluster replication auto-follow patterns. +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ccr-get-auto-follow-pattern-1) +```ts +client.ccr.getAutoFollowPattern({ ... }) +``` -### follow_stats [_follow_stats] +### Arguments [_arguments_ccr.get_auto_follow_pattern] -Get follower stats. Get cross-cluster replication follower stats. The API returns shard-level stats about the "following tasks" associated with each shard for the specified indices. +#### Request (object) [_request_ccr.get_auto_follow_pattern] +- **`name` (Optional, string)**: The auto-follow pattern collection that you want to retrieve. +If you do not specify a name, the API returns information for all collections. +- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. +If the master node is not available before the timeout expires, the request fails and returns an error. +It can also be set to `-1` to indicate that the request should never timeout. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-follow-stats) +## client.ccr.pauseAutoFollowPattern [_ccr.pause_auto_follow_pattern] +Pause an auto-follow pattern. -```ts -client.ccr.followStats({ index }) -``` - - -### Arguments [_arguments_77] - -* **Request (object):** - - * **`index` (string | string[])**: A list of index patterns; use `_all` to perform the operation on all indices - * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - - - -### forget_follower [_forget_follower] - -Forget a follower. Remove the cross-cluster replication follower retention leases from the leader. - -A following index takes out retention leases on its leader index. These leases are used to increase the likelihood that the shards of the leader index retain the history of operations that the shards of the following index need to run replication. When a follower index is converted to a regular index by the unfollow API (either by directly calling the API or by index lifecycle management tasks), these leases are removed. However, removal of the leases can fail, for example when the remote cluster containing the leader index is unavailable. While the leases will eventually expire on their own, their extended existence can cause the leader index to hold more history than necessary and prevent index lifecycle management from performing some operations on the leader index. This API exists to enable manually removing the leases when the unfollow API is unable to do so. - -::::{note} -This API does not stop replication by a following index. If you use this API with a follower index that is still actively following, the following index will add back retention leases on the leader. The only purpose of this API is to handle the case of failure to remove the following retention leases after the unfollow API is invoked. -:::: - - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-forget-follower) - -```ts -client.ccr.forgetFollower({ index }) -``` - - -### Arguments [_arguments_78] - -* **Request (object):** - - * **`index` (string)**: the name of the leader index for which specified follower retention leases should be removed - * **`follower_cluster` (Optional, string)** - * **`follower_index` (Optional, string)** - * **`follower_index_uuid` (Optional, string)** - * **`leader_remote_cluster` (Optional, string)** - * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - - - -### get_auto_follow_pattern [_get_auto_follow_pattern] - -Get auto-follow patterns. Get cross-cluster replication auto-follow patterns. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-get-auto-follow-pattern-1) - -```ts -client.ccr.getAutoFollowPattern({ ... }) -``` +Pause a cross-cluster replication auto-follow pattern. +When the API returns, the auto-follow pattern is inactive. +New indices that are created on the remote cluster and match the auto-follow patterns are ignored. +You can resume auto-following with the resume auto-follow pattern API. +When it resumes, the auto-follow pattern is active again and automatically configures follower indices for newly created indices on the remote cluster that match its patterns. +Remote indices that were created while the pattern was paused will also be followed, unless they have been deleted or closed in the interim. -### Arguments [_arguments_79] - -* **Request (object):** - - * **`name` (Optional, string)**: Specifies the auto-follow pattern collection that you want to retrieve. If you do not specify a name, the API returns information for all collections. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. - - - -### pause_auto_follow_pattern [_pause_auto_follow_pattern] - -Pause an auto-follow pattern. Pause a cross-cluster replication auto-follow pattern. When the API returns, the auto-follow pattern is inactive. New indices that are created on the remote cluster and match the auto-follow patterns are ignored. - -You can resume auto-following with the resume auto-follow pattern API. When it resumes, the auto-follow pattern is active again and automatically configures follower indices for newly created indices on the remote cluster that match its patterns. Remote indices that were created while the pattern was paused will also be followed, unless they have been deleted or closed in the interim. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-pause-auto-follow-pattern) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ccr-pause-auto-follow-pattern) ```ts client.ccr.pauseAutoFollowPattern({ name }) ``` +### Arguments [_arguments_ccr.pause_auto_follow_pattern] -### Arguments [_arguments_80] +#### Request (object) [_request_ccr.pause_auto_follow_pattern] +- **`name` (string)**: The name of the auto-follow pattern to pause. +- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. +If the master node is not available before the timeout expires, the request fails and returns an error. +It can also be set to `-1` to indicate that the request should never timeout. -* **Request (object):** +## client.ccr.pauseFollow [_ccr.pause_follow] +Pause a follower. - * **`name` (string)**: The name of the auto follow pattern that should pause discovering new indices to follow. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +Pause a cross-cluster replication follower index. +The follower index will not fetch any additional operations from the leader index. +You can resume following with the resume follower API. +You can pause and resume a follower index to change the configuration of the following task. - - -### pause_follow [_pause_follow] - -Pause a follower. Pause a cross-cluster replication follower index. The follower index will not fetch any additional operations from the leader index. You can resume following with the resume follower API. You can pause and resume a follower index to change the configuration of the following task. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-pause-follow) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ccr-pause-follow) ```ts client.ccr.pauseFollow({ index }) ``` +### Arguments [_arguments_ccr.pause_follow] -### Arguments [_arguments_81] - -* **Request (object):** - - * **`index` (string)**: The name of the follower index that should pause following its leader index. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. - - +#### Request (object) [_request_ccr.pause_follow] +- **`index` (string)**: The name of the follower index. +- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. +If the master node is not available before the timeout expires, the request fails and returns an error. +It can also be set to `-1` to indicate that the request should never timeout. -### put_auto_follow_pattern [_put_auto_follow_pattern] +## client.ccr.putAutoFollowPattern [_ccr.put_auto_follow_pattern] +Create or update auto-follow patterns. +Create a collection of cross-cluster replication auto-follow patterns for a remote cluster. +Newly created indices on the remote cluster that match any of the patterns are automatically configured as follower indices. +Indices on the remote cluster that were created before the auto-follow pattern was created will not be auto-followed even if they match the pattern. -Create or update auto-follow patterns. Create a collection of cross-cluster replication auto-follow patterns for a remote cluster. Newly created indices on the remote cluster that match any of the patterns are automatically configured as follower indices. Indices on the remote cluster that were created before the auto-follow pattern was created will not be auto-followed even if they match the pattern. +This API can also be used to update auto-follow patterns. +NOTE: Follower indices that were configured automatically before updating an auto-follow pattern will remain unchanged even if they do not match against the new patterns. -This API can also be used to update auto-follow patterns. NOTE: Follower indices that were configured automatically before updating an auto-follow pattern will remain unchanged even if they do not match against the new patterns. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-put-auto-follow-pattern) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ccr-put-auto-follow-pattern) ```ts client.ccr.putAutoFollowPattern({ name, remote_cluster }) ``` +### Arguments [_arguments_ccr.put_auto_follow_pattern] -### Arguments [_arguments_82] - -* **Request (object):** - - * **`name` (string)**: The name of the collection of auto-follow patterns. - * **`remote_cluster` (string)**: The remote cluster containing the leader indices to match against. - * **`follow_index_pattern` (Optional, string)**: The name of follower index. The template `{{leader_index}}` can be used to derive the name of the follower index from the name of the leader index. When following a data stream, use `{{leader_index}}`; CCR does not support changes to the names of a follower data stream’s backing indices. - * **`leader_index_patterns` (Optional, string[])**: An array of simple index patterns to match against indices in the remote cluster specified by the remote_cluster field. - * **`leader_index_exclusion_patterns` (Optional, string[])**: An array of simple index patterns that can be used to exclude indices from being auto-followed. Indices in the remote cluster whose names are matching one or more leader_index_patterns and one or more leader_index_exclusion_patterns won’t be followed. - * **`max_outstanding_read_requests` (Optional, number)**: The maximum number of outstanding reads requests from the remote cluster. - * **`settings` (Optional, Record)**: Settings to override from the leader index. Note that certain settings can not be overrode (e.g., index.number_of_shards). - * **`max_outstanding_write_requests` (Optional, number)**: The maximum number of outstanding reads requests from the remote cluster. - * **`read_poll_timeout` (Optional, string | -1 | 0)**: The maximum time to wait for new operations on the remote cluster when the follower index is synchronized with the leader index. When the timeout has elapsed, the poll for operations will return to the follower so that it can update some statistics. Then the follower will immediately attempt to read from the leader again. - * **`max_read_request_operation_count` (Optional, number)**: The maximum number of operations to pull per read from the remote cluster. - * **`max_read_request_size` (Optional, number | string)**: The maximum size in bytes of per read of a batch of operations pulled from the remote cluster. - * **`max_retry_delay` (Optional, string | -1 | 0)**: The maximum time to wait before retrying an operation that failed exceptionally. An exponential backoff strategy is employed when retrying. - * **`max_write_buffer_count` (Optional, number)**: The maximum number of operations that can be queued for writing. When this limit is reached, reads from the remote cluster will be deferred until the number of queued operations goes below the limit. - * **`max_write_buffer_size` (Optional, number | string)**: The maximum total bytes of operations that can be queued for writing. When this limit is reached, reads from the remote cluster will be deferred until the total bytes of queued operations goes below the limit. - * **`max_write_request_operation_count` (Optional, number)**: The maximum number of operations per bulk write request executed on the follower. - * **`max_write_request_size` (Optional, number | string)**: The maximum total bytes of operations per bulk write request executed on the follower. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. - +#### Request (object) [_request_ccr.put_auto_follow_pattern] +- **`name` (string)**: The name of the collection of auto-follow patterns. +- **`remote_cluster` (string)**: The remote cluster containing the leader indices to match against. +- **`follow_index_pattern` (Optional, string)**: The name of follower index. The template `leader_index` can be used to derive the name of the follower index from the name of the leader index. When following a data stream, use `leader_index`; CCR does not support changes to the names of a follower data stream’s backing indices. +- **`leader_index_patterns` (Optional, string[])**: An array of simple index patterns to match against indices in the remote cluster specified by the remote_cluster field. +- **`leader_index_exclusion_patterns` (Optional, string[])**: An array of simple index patterns that can be used to exclude indices from being auto-followed. Indices in the remote cluster whose names are matching one or more leader_index_patterns and one or more leader_index_exclusion_patterns won’t be followed. +- **`max_outstanding_read_requests` (Optional, number)**: The maximum number of outstanding reads requests from the remote cluster. +- **`settings` (Optional, Record)**: Settings to override from the leader index. Note that certain settings can not be overrode (e.g., index.number_of_shards). +- **`max_outstanding_write_requests` (Optional, number)**: The maximum number of outstanding reads requests from the remote cluster. +- **`read_poll_timeout` (Optional, string | -1 | 0)**: The maximum time to wait for new operations on the remote cluster when the follower index is synchronized with the leader index. When the timeout has elapsed, the poll for operations will return to the follower so that it can update some statistics. Then the follower will immediately attempt to read from the leader again. +- **`max_read_request_operation_count` (Optional, number)**: The maximum number of operations to pull per read from the remote cluster. +- **`max_read_request_size` (Optional, number | string)**: The maximum size in bytes of per read of a batch of operations pulled from the remote cluster. +- **`max_retry_delay` (Optional, string | -1 | 0)**: The maximum time to wait before retrying an operation that failed exceptionally. An exponential backoff strategy is employed when retrying. +- **`max_write_buffer_count` (Optional, number)**: The maximum number of operations that can be queued for writing. When this limit is reached, reads from the remote cluster will be deferred until the number of queued operations goes below the limit. +- **`max_write_buffer_size` (Optional, number | string)**: The maximum total bytes of operations that can be queued for writing. When this limit is reached, reads from the remote cluster will be deferred until the total bytes of queued operations goes below the limit. +- **`max_write_request_operation_count` (Optional, number)**: The maximum number of operations per bulk write request executed on the follower. +- **`max_write_request_size` (Optional, number | string)**: The maximum total bytes of operations per bulk write request executed on the follower. +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +## client.ccr.resumeAutoFollowPattern [_ccr.resume_auto_follow_pattern] +Resume an auto-follow pattern. -### resume_auto_follow_pattern [_resume_auto_follow_pattern] +Resume a cross-cluster replication auto-follow pattern that was paused. +The auto-follow pattern will resume configuring following indices for newly created indices that match its patterns on the remote cluster. +Remote indices created while the pattern was paused will also be followed unless they have been deleted or closed in the interim. -Resume an auto-follow pattern. Resume a cross-cluster replication auto-follow pattern that was paused. The auto-follow pattern will resume configuring following indices for newly created indices that match its patterns on the remote cluster. Remote indices created while the pattern was paused will also be followed unless they have been deleted or closed in the interim. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-resume-auto-follow-pattern) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ccr-resume-auto-follow-pattern) ```ts client.ccr.resumeAutoFollowPattern({ name }) ``` +### Arguments [_arguments_ccr.resume_auto_follow_pattern] -### Arguments [_arguments_83] - -* **Request (object):** - - * **`name` (string)**: The name of the auto follow pattern to resume discovering new indices to follow. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. - +#### Request (object) [_request_ccr.resume_auto_follow_pattern] +- **`name` (string)**: The name of the auto-follow pattern to resume. +- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. +If the master node is not available before the timeout expires, the request fails and returns an error. +It can also be set to `-1` to indicate that the request should never timeout. +## client.ccr.resumeFollow [_ccr.resume_follow] +Resume a follower. +Resume a cross-cluster replication follower index that was paused. +The follower index could have been paused with the pause follower API. +Alternatively it could be paused due to replication that cannot be retried due to failures during following tasks. +When this API returns, the follower index will resume fetching operations from the leader index. -### resume_follow [_resume_follow] - -Resume a follower. Resume a cross-cluster replication follower index that was paused. The follower index could have been paused with the pause follower API. Alternatively it could be paused due to replication that cannot be retried due to failures during following tasks. When this API returns, the follower index will resume fetching operations from the leader index. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-resume-follow) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ccr-resume-follow) ```ts client.ccr.resumeFollow({ index }) ``` +### Arguments [_arguments_ccr.resume_follow] -### Arguments [_arguments_84] - -* **Request (object):** - - * **`index` (string)**: The name of the follow index to resume following. - * **`max_outstanding_read_requests` (Optional, number)** - * **`max_outstanding_write_requests` (Optional, number)** - * **`max_read_request_operation_count` (Optional, number)** - * **`max_read_request_size` (Optional, string)** - * **`max_retry_delay` (Optional, string | -1 | 0)** - * **`max_write_buffer_count` (Optional, number)** - * **`max_write_buffer_size` (Optional, string)** - * **`max_write_request_operation_count` (Optional, number)** - * **`max_write_request_size` (Optional, string)** - * **`read_poll_timeout` (Optional, string | -1 | 0)** - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +#### Request (object) [_request_ccr.resume_follow] +- **`index` (string)**: The name of the follow index to resume following. +- **`max_outstanding_read_requests` (Optional, number)** +- **`max_outstanding_write_requests` (Optional, number)** +- **`max_read_request_operation_count` (Optional, number)** +- **`max_read_request_size` (Optional, string)** +- **`max_retry_delay` (Optional, string | -1 | 0)** +- **`max_write_buffer_count` (Optional, number)** +- **`max_write_buffer_size` (Optional, string)** +- **`max_write_request_operation_count` (Optional, number)** +- **`max_write_request_size` (Optional, string)** +- **`read_poll_timeout` (Optional, string | -1 | 0)** +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +## client.ccr.stats [_ccr.stats] +Get cross-cluster replication stats. +This API returns stats about auto-following and the same shard-level stats as the get follower stats API. -### stats [_stats] - -Get cross-cluster replication stats. This API returns stats about auto-following and the same shard-level stats as the get follower stats API. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-stats) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ccr-stats) ```ts client.ccr.stats({ ... }) ``` +### Arguments [_arguments_ccr.stats] -### Arguments [_arguments_85] - -* **Request (object):** - - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. - * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +#### Request (object) [_request_ccr.stats] +- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. +If the master node is not available before the timeout expires, the request fails and returns an error. +It can also be set to `-1` to indicate that the request should never timeout. +- **`timeout` (Optional, string | -1 | 0)**: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +## client.ccr.unfollow [_ccr.unfollow] +Unfollow an index. +Convert a cross-cluster replication follower index to a regular index. +The API stops the following task associated with a follower index and removes index metadata and settings associated with cross-cluster replication. +The follower index must be paused and closed before you call the unfollow API. -### unfollow [_unfollow] +> info +> Currently cross-cluster replication does not support converting an existing regular index to a follower index. Converting a follower index to a regular index is an irreversible operation. -Unfollow an index. Convert a cross-cluster replication follower index to a regular index. The API stops the following task associated with a follower index and removes index metadata and settings associated with cross-cluster replication. The follower index must be paused and closed before you call the unfollow API. - -::::{note} -Currently cross-cluster replication does not support converting an existing regular index to a follower index. Converting a follower index to a regular index is an irreversible operation. -:::: - - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-unfollow) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ccr-unfollow) ```ts client.ccr.unfollow({ index }) ``` +### Arguments [_arguments_ccr.unfollow] -### Arguments [_arguments_86] - -* **Request (object):** - - * **`index` (string)**: The name of the follower index that should be turned into a regular index. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. - - - -## cluster [_cluster] +#### Request (object) [_request_ccr.unfollow] +- **`index` (string)**: The name of the follower index. +- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. +If the master node is not available before the timeout expires, the request fails and returns an error. +It can also be set to `-1` to indicate that the request should never timeout. +## client.cluster.allocationExplain [_cluster.allocation_explain] +Explain the shard allocations. +Get explanations for shard allocations in the cluster. +For unassigned shards, it provides an explanation for why the shard is unassigned. +For assigned shards, it provides an explanation for why the shard is remaining on its current node and has not moved or rebalanced to another node. +This API can be very useful when attempting to diagnose why a shard is unassigned or why a shard continues to remain on its current node when you might expect otherwise. -### allocation_explain [_allocation_explain] - -Explain the shard allocations. Get explanations for shard allocations in the cluster. For unassigned shards, it provides an explanation for why the shard is unassigned. For assigned shards, it provides an explanation for why the shard is remaining on its current node and has not moved or rebalanced to another node. This API can be very useful when attempting to diagnose why a shard is unassigned or why a shard continues to remain on its current node when you might expect otherwise. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-allocation-explain) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-cluster-allocation-explain) ```ts client.cluster.allocationExplain({ ... }) ``` +### Arguments [_arguments_cluster.allocation_explain] -### Arguments [_arguments_87] - -* **Request (object):** - - * **`current_node` (Optional, string)**: Specifies the node ID or the name of the node to only explain a shard that is currently located on the specified node. - * **`index` (Optional, string)**: Specifies the name of the index that you would like an explanation for. - * **`primary` (Optional, boolean)**: If true, returns explanation for the primary shard for the given shard ID. - * **`shard` (Optional, number)**: Specifies the ID of the shard that you would like an explanation for. - * **`include_disk_info` (Optional, boolean)**: If true, returns information about disk usage and shard sizes. - * **`include_yes_decisions` (Optional, boolean)**: If true, returns YES decisions in explanation. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +#### Request (object) [_request_cluster.allocation_explain] +- **`current_node` (Optional, string)**: Specifies the node ID or the name of the node to only explain a shard that is currently located on the specified node. +- **`index` (Optional, string)**: Specifies the name of the index that you would like an explanation for. +- **`primary` (Optional, boolean)**: If true, returns explanation for the primary shard for the given shard ID. +- **`shard` (Optional, number)**: Specifies the ID of the shard that you would like an explanation for. +- **`include_disk_info` (Optional, boolean)**: If true, returns information about disk usage and shard sizes. +- **`include_yes_decisions` (Optional, boolean)**: If true, returns YES decisions in explanation. +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +## client.cluster.deleteComponentTemplate [_cluster.delete_component_template] +Delete component templates. +Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases. - -### delete_component_template [_delete_component_template] - -Delete component templates. Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-put-component-template) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-cluster-put-component-template) ```ts client.cluster.deleteComponentTemplate({ name }) ``` +### Arguments [_arguments_cluster.delete_component_template] -### Arguments [_arguments_88] - -* **Request (object):** +#### Request (object) [_request_cluster.delete_component_template] +- **`name` (string | string[])**: List or wildcard expression of component template names used to limit the request. +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. - * **`name` (string | string[])**: List or wildcard expression of component template names used to limit the request. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +## client.cluster.deleteVotingConfigExclusions [_cluster.delete_voting_config_exclusions] +Clear cluster voting config exclusions. +Remove master-eligible nodes from the voting configuration exclusion list. - - -### delete_voting_config_exclusions [_delete_voting_config_exclusions] - -Clear cluster voting config exclusions. Remove master-eligible nodes from the voting configuration exclusion list. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-post-voting-config-exclusions) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-cluster-post-voting-config-exclusions) ```ts client.cluster.deleteVotingConfigExclusions({ ... }) ``` +### Arguments [_arguments_cluster.delete_voting_config_exclusions] -### Arguments [_arguments_89] - -* **Request (object):** - - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. - * **`wait_for_removal` (Optional, boolean)**: Specifies whether to wait for all excluded nodes to be removed from the cluster before clearing the voting configuration exclusions list. Defaults to true, meaning that all excluded nodes must be removed from the cluster before this API takes any action. If set to false then the voting configuration exclusions list is cleared even if some excluded nodes are still in the cluster. - - +#### Request (object) [_request_cluster.delete_voting_config_exclusions] +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +- **`wait_for_removal` (Optional, boolean)**: Specifies whether to wait for all excluded nodes to be removed from the +cluster before clearing the voting configuration exclusions list. +Defaults to true, meaning that all excluded nodes must be removed from +the cluster before this API takes any action. If set to false then the +voting configuration exclusions list is cleared even if some excluded +nodes are still in the cluster. -### exists_component_template [_exists_component_template] +## client.cluster.existsComponentTemplate [_cluster.exists_component_template] +Check component templates. +Returns information about whether a particular component template exists. -Check component templates. Returns information about whether a particular component template exists. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-put-component-template) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-cluster-put-component-template) ```ts client.cluster.existsComponentTemplate({ name }) ``` +### Arguments [_arguments_cluster.exists_component_template] -### Arguments [_arguments_90] - -* **Request (object):** - - * **`name` (string | string[])**: List of component template names used to limit the request. Wildcard (*) expressions are supported. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - * **`local` (Optional, boolean)**: If true, the request retrieves information from the local node only. Defaults to false, which means information is retrieved from the master node. - +#### Request (object) [_request_cluster.exists_component_template] +- **`name` (string | string[])**: List of component template names used to limit the request. +Wildcard (*) expressions are supported. +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is +received before the timeout expires, the request fails and returns an +error. +- **`local` (Optional, boolean)**: If true, the request retrieves information from the local node only. +Defaults to false, which means information is retrieved from the master node. +## client.cluster.getComponentTemplate [_cluster.get_component_template] +Get component templates. +Get information about component templates. -### get_component_template [_get_component_template] - -Get component templates. Get information about component templates. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-put-component-template) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-cluster-put-component-template) ```ts client.cluster.getComponentTemplate({ ... }) ``` +### Arguments [_arguments_cluster.get_component_template] -### Arguments [_arguments_91] - -* **Request (object):** +#### Request (object) [_request_cluster.get_component_template] +- **`name` (Optional, string)**: List of component template names used to limit the request. +Wildcard (`*`) expressions are supported. +- **`flat_settings` (Optional, boolean)**: If `true`, returns settings in flat format. +- **`include_defaults` (Optional, boolean)**: Return all default configurations for the component template (default: false) +- **`local` (Optional, boolean)**: If `true`, the request retrieves information from the local node only. +If `false`, information is retrieved from the master node. +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. - * **`name` (Optional, string)**: List of component template names used to limit the request. Wildcard (`*`) expressions are supported. - * **`flat_settings` (Optional, boolean)**: If `true`, returns settings in flat format. - * **`include_defaults` (Optional, boolean)**: Return all default configurations for the component template (default: false) - * **`local` (Optional, boolean)**: If `true`, the request retrieves information from the local node only. If `false`, information is retrieved from the master node. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +## client.cluster.getSettings [_cluster.get_settings] +Get cluster-wide settings. +By default, it returns only settings that have been explicitly defined. - - -### get_settings [_get_settings] - -Get cluster-wide settings. By default, it returns only settings that have been explicitly defined. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-get-settings) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-cluster-get-settings) ```ts client.cluster.getSettings({ ... }) ``` +### Arguments [_arguments_cluster.get_settings] -### Arguments [_arguments_92] - -* **Request (object):** - - * **`flat_settings` (Optional, boolean)**: If `true`, returns settings in flat format. - * **`include_defaults` (Optional, boolean)**: If `true`, returns default cluster settings from the local node. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - - +#### Request (object) [_request_cluster.get_settings] +- **`flat_settings` (Optional, boolean)**: If `true`, returns settings in flat format. +- **`include_defaults` (Optional, boolean)**: If `true`, returns default cluster settings from the local node. +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. -### health [_health_2] +## client.cluster.health [_cluster.health] +Get the cluster health status. -Get the cluster health status. You can also use the API to get the health status of only specified data streams and indices. For data streams, the API retrieves the health status of the stream’s backing indices. +You can also use the API to get the health status of only specified data streams and indices. +For data streams, the API retrieves the health status of the stream’s backing indices. -The cluster health status is: green, yellow or red. On the shard level, a red status indicates that the specific shard is not allocated in the cluster. Yellow means that the primary shard is allocated but replicas are not. Green means that all shards are allocated. The index level status is controlled by the worst shard status. +The cluster health status is: green, yellow or red. +On the shard level, a red status indicates that the specific shard is not allocated in the cluster. Yellow means that the primary shard is allocated but replicas are not. Green means that all shards are allocated. +The index level status is controlled by the worst shard status. -One of the main benefits of the API is the ability to wait until the cluster reaches a certain high watermark health level. The cluster status is controlled by the worst index status. +One of the main benefits of the API is the ability to wait until the cluster reaches a certain high watermark health level. +The cluster status is controlled by the worst index status. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-health) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-cluster-health) ```ts client.cluster.health({ ... }) ``` +### Arguments [_arguments_cluster.health] -### Arguments [_arguments_93] - -* **Request (object):** - - * **`index` (Optional, string | string[])**: List of data streams, indices, and index aliases used to limit the request. Wildcard expressions (`*`) are supported. To target all data streams and indices in a cluster, omit this parameter or use _all or `*`. - * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Whether to expand wildcard expression to concrete indices that are open, closed or both. - * **`level` (Optional, Enum("cluster" | "indices" | "shards"))**: Can be one of cluster, indices or shards. Controls the details level of the health information returned. - * **`local` (Optional, boolean)**: If true, the request retrieves information from the local node only. Defaults to false, which means information is retrieved from the master node. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - * **`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))**: A number controlling to how many active shards to wait for, all to wait for all shards in the cluster to be active, or 0 to not wait. - * **`wait_for_events` (Optional, Enum("immediate" | "urgent" | "high" | "normal" | "low" | "languid"))**: Can be one of immediate, urgent, high, normal, low, languid. Wait until all currently queued events with the given priority are processed. - * **`wait_for_nodes` (Optional, string | number)**: The request waits until the specified number N of nodes is available. It also accepts >=N, ⇐N, >N and yellow > red. By default, will not wait for any status. - +#### Request (object) [_request_cluster.health] +- **`index` (Optional, string | string[])**: List of data streams, indices, and index aliases used to limit the request. Wildcard expressions (`*`) are supported. To target all data streams and indices in a cluster, omit this parameter or use _all or `*`. +- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Whether to expand wildcard expression to concrete indices that are open, closed or both. +- **`level` (Optional, Enum("cluster" | "indices" | "shards"))**: Can be one of cluster, indices or shards. Controls the details level of the health information returned. +- **`local` (Optional, boolean)**: If true, the request retrieves information from the local node only. Defaults to false, which means information is retrieved from the master node. +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +- **`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))**: A number controlling to how many active shards to wait for, all to wait for all shards in the cluster to be active, or 0 to not wait. +- **`wait_for_events` (Optional, Enum("immediate" | "urgent" | "high" | "normal" | "low" | "languid"))**: Can be one of immediate, urgent, high, normal, low, languid. Wait until all currently queued events with the given priority are processed. +- **`wait_for_nodes` (Optional, string | number)**: The request waits until the specified number N of nodes is available. It also accepts >=N, <=N, >N and yellow > red. By default, will not wait for any status. +## client.cluster.info [_cluster.info] +Get cluster info. +Returns basic information about the cluster. -### info [_info_2] - -Get cluster info. Returns basic information about the cluster. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-info) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-cluster-info) ```ts client.cluster.info({ target }) ``` +### Arguments [_arguments_cluster.info] -### Arguments [_arguments_94] - -* **Request (object):** - - * **`target` (Enum("_all" | "http" | "ingest" | "thread_pool" | "script") | Enum("_all" | "http" | "ingest" | "thread_pool" | "script")[])**: Limits the information returned to the specific target. Supports a list, such as http,ingest. +#### Request (object) [_request_cluster.info] +- **`target` (Enum("_all" | "http" | "ingest" | "thread_pool" | "script") | Enum("_all" | "http" | "ingest" | "thread_pool" | "script")[])**: Limits the information returned to the specific target. Supports a list, such as http,ingest. +## client.cluster.pendingTasks [_cluster.pending_tasks] +Get the pending cluster tasks. +Get information about cluster-level changes (such as create index, update mapping, allocate or fail shard) that have not yet taken effect. +NOTE: This API returns a list of any pending updates to the cluster state. +These are distinct from the tasks reported by the task management API which include periodic tasks and tasks initiated by the user, such as node stats, search queries, or create index requests. +However, if a user-initiated task such as a create index command causes a cluster state update, the activity of this task might be reported by both task api and pending cluster tasks API. -### pending_tasks [_pending_tasks_2] - -Get the pending cluster tasks. Get information about cluster-level changes (such as create index, update mapping, allocate or fail shard) that have not yet taken effect. - -::::{note} -This API returns a list of any pending updates to the cluster state. These are distinct from the tasks reported by the task management API which include periodic tasks and tasks initiated by the user, such as node stats, search queries, or create index requests. However, if a user-initiated task such as a create index command causes a cluster state update, the activity of this task might be reported by both task api and pending cluster tasks API. -:::: - - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-pending-tasks) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-cluster-pending-tasks) ```ts client.cluster.pendingTasks({ ... }) ``` +### Arguments [_arguments_cluster.pending_tasks] -### Arguments [_arguments_95] - -* **Request (object):** - - * **`local` (Optional, boolean)**: If `true`, the request retrieves information from the local node only. If `false`, information is retrieved from the master node. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - - +#### Request (object) [_request_cluster.pending_tasks] +- **`local` (Optional, boolean)**: If `true`, the request retrieves information from the local node only. +If `false`, information is retrieved from the master node. +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. -### post_voting_config_exclusions [_post_voting_config_exclusions] +## client.cluster.postVotingConfigExclusions [_cluster.post_voting_config_exclusions] +Update voting configuration exclusions. +Update the cluster voting config exclusions by node IDs or node names. +By default, if there are more than three master-eligible nodes in the cluster and you remove fewer than half of the master-eligible nodes in the cluster at once, the voting configuration automatically shrinks. +If you want to shrink the voting configuration to contain fewer than three nodes or to remove half or more of the master-eligible nodes in the cluster at once, use this API to remove departing nodes from the voting configuration manually. +The API adds an entry for each specified node to the cluster’s voting configuration exclusions list. +It then waits until the cluster has reconfigured its voting configuration to exclude the specified nodes. -Update voting configuration exclusions. Update the cluster voting config exclusions by node IDs or node names. By default, if there are more than three master-eligible nodes in the cluster and you remove fewer than half of the master-eligible nodes in the cluster at once, the voting configuration automatically shrinks. If you want to shrink the voting configuration to contain fewer than three nodes or to remove half or more of the master-eligible nodes in the cluster at once, use this API to remove departing nodes from the voting configuration manually. The API adds an entry for each specified node to the cluster’s voting configuration exclusions list. It then waits until the cluster has reconfigured its voting configuration to exclude the specified nodes. +Clusters should have no voting configuration exclusions in normal operation. +Once the excluded nodes have stopped, clear the voting configuration exclusions with `DELETE /_cluster/voting_config_exclusions`. +This API waits for the nodes to be fully removed from the cluster before it returns. +If your cluster has voting configuration exclusions for nodes that you no longer intend to remove, use `DELETE /_cluster/voting_config_exclusions?wait_for_removal=false` to clear the voting configuration exclusions without waiting for the nodes to leave the cluster. -Clusters should have no voting configuration exclusions in normal operation. Once the excluded nodes have stopped, clear the voting configuration exclusions with `DELETE /_cluster/voting_config_exclusions`. This API waits for the nodes to be fully removed from the cluster before it returns. If your cluster has voting configuration exclusions for nodes that you no longer intend to remove, use `DELETE /_cluster/voting_config_exclusions?wait_for_removal=false` to clear the voting configuration exclusions without waiting for the nodes to leave the cluster. +A response to `POST /_cluster/voting_config_exclusions` with an HTTP status code of 200 OK guarantees that the node has been removed from the voting configuration and will not be reinstated until the voting configuration exclusions are cleared by calling `DELETE /_cluster/voting_config_exclusions`. +If the call to `POST /_cluster/voting_config_exclusions` fails or returns a response with an HTTP status code other than 200 OK then the node may not have been removed from the voting configuration. +In that case, you may safely retry the call. -A response to `POST /_cluster/voting_config_exclusions` with an HTTP status code of 200 OK guarantees that the node has been removed from the voting configuration and will not be reinstated until the voting configuration exclusions are cleared by calling `DELETE /_cluster/voting_config_exclusions`. If the call to `POST /_cluster/voting_config_exclusions` fails or returns a response with an HTTP status code other than 200 OK then the node may not have been removed from the voting configuration. In that case, you may safely retry the call. +NOTE: Voting exclusions are required only when you remove at least half of the master-eligible nodes from a cluster in a short time period. +They are not required when removing master-ineligible nodes or when removing fewer than half of the master-eligible nodes. -::::{note} -Voting exclusions are required only when you remove at least half of the master-eligible nodes from a cluster in a short time period. They are not required when removing master-ineligible nodes or when removing fewer than half of the master-eligible nodes. -:::: - - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-post-voting-config-exclusions) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-cluster-post-voting-config-exclusions) ```ts client.cluster.postVotingConfigExclusions({ ... }) ``` +### Arguments [_arguments_cluster.post_voting_config_exclusions] -### Arguments [_arguments_96] - -* **Request (object):** - - * **`node_names` (Optional, string | string[])**: A list of the names of the nodes to exclude from the voting configuration. If specified, you may not also specify node_ids. - * **`node_ids` (Optional, string | string[])**: A list of the persistent ids of the nodes to exclude from the voting configuration. If specified, you may not also specify node_names. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. - * **`timeout` (Optional, string | -1 | 0)**: When adding a voting configuration exclusion, the API waits for the specified nodes to be excluded from the voting configuration before returning. If the timeout expires before the appropriate condition is satisfied, the request fails and returns an error. +#### Request (object) [_request_cluster.post_voting_config_exclusions] +- **`node_names` (Optional, string | string[])**: A list of the names of the nodes to exclude from the +voting configuration. If specified, you may not also specify node_ids. +- **`node_ids` (Optional, string | string[])**: A list of the persistent ids of the nodes to exclude +from the voting configuration. If specified, you may not also specify node_names. +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +- **`timeout` (Optional, string | -1 | 0)**: When adding a voting configuration exclusion, the API waits for the +specified nodes to be excluded from the voting configuration before +returning. If the timeout expires before the appropriate condition +is satisfied, the request fails and returns an error. +## client.cluster.putComponentTemplate [_cluster.put_component_template] +Create or update a component template. +Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases. - -### put_component_template [_put_component_template] - -Create or update a component template. Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases. - -An index template can be composed of multiple component templates. To use a component template, specify it in an index template’s `composed_of` list. Component templates are only applied to new data streams and indices as part of a matching index template. +An index template can be composed of multiple component templates. +To use a component template, specify it in an index template’s `composed_of` list. +Component templates are only applied to new data streams and indices as part of a matching index template. Settings and mappings specified directly in the index template or the create index request override any settings or mappings specified in a component template. -Component templates are only used during index creation. For data streams, this includes data stream creation and the creation of a stream’s backing indices. Changes to component templates do not affect existing indices, including a stream’s backing indices. +Component templates are only used during index creation. +For data streams, this includes data stream creation and the creation of a stream’s backing indices. +Changes to component templates do not affect existing indices, including a stream’s backing indices. -You can use C-style `/* *\/` block comments in component templates. You can include comments anywhere in the request body except before the opening curly bracket. +You can use C-style `/* *\/` block comments in component templates. +You can include comments anywhere in the request body except before the opening curly bracket. **Applying component templates** -You cannot directly apply a component template to a data stream or index. To be applied, a component template must be included in an index template’s `composed_of` list. +You cannot directly apply a component template to a data stream or index. +To be applied, a component template must be included in an index template's `composed_of` list. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-put-component-template) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-cluster-put-component-template) ```ts client.cluster.putComponentTemplate({ name, template }) ``` +### Arguments [_arguments_cluster.put_component_template] -### Arguments [_arguments_97] - -* **Request (object):** +#### Request (object) [_request_cluster.put_component_template] +- **`name` (string)**: Name of the component template to create. +Elasticsearch includes the following built-in component templates: `logs-mappings`; `logs-settings`; `metrics-mappings`; `metrics-settings`;`synthetics-mapping`; `synthetics-settings`. +Elastic Agent uses these templates to configure backing indices for its data streams. +If you use Elastic Agent and want to overwrite one of these templates, set the `version` for your replacement template higher than the current version. +If you don’t use Elastic Agent and want to disable all built-in component and index templates, set `stack.templates.enabled` to `false` using the cluster update settings API. +- **`template` ({ aliases, mappings, settings, defaults, data_stream, lifecycle })**: The template to be applied which includes mappings, settings, or aliases configuration. +- **`version` (Optional, number)**: Version number used to manage component templates externally. +This number isn't automatically generated or incremented by Elasticsearch. +To unset a version, replace the template without specifying a version. +- **`_meta` (Optional, Record)**: Optional user metadata about the component template. +It may have any contents. This map is not automatically generated by Elasticsearch. +This information is stored in the cluster state, so keeping it short is preferable. +To unset `_meta`, replace the template without specifying this information. +- **`deprecated` (Optional, boolean)**: Marks this index template as deprecated. When creating or updating a non-deprecated index template +that uses deprecated components, Elasticsearch will emit a deprecation warning. +- **`create` (Optional, boolean)**: If `true`, this request cannot replace or update existing component templates. +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. - * **`name` (string)**: Name of the component template to create. Elasticsearch includes the following built-in component templates: `logs-mappings`; `logs-settings`; `metrics-mappings`; `metrics-settings`;`synthetics-mapping`; `synthetics-settings`. Elastic Agent uses these templates to configure backing indices for its data streams. If you use Elastic Agent and want to overwrite one of these templates, set the `version` for your replacement template higher than the current version. If you don’t use Elastic Agent and want to disable all built-in component and index templates, set `stack.templates.enabled` to `false` using the cluster update settings API. - * **`template` ({ aliases, mappings, settings, defaults, data_stream, lifecycle })**: The template to be applied which includes mappings, settings, or aliases configuration. - * **`version` (Optional, number)**: Version number used to manage component templates externally. This number isn’t automatically generated or incremented by Elasticsearch. To unset a version, replace the template without specifying a version. - * **`_meta` (Optional, Record)**: Optional user metadata about the component template. It may have any contents. This map is not automatically generated by Elasticsearch. This information is stored in the cluster state, so keeping it short is preferable. To unset `_meta`, replace the template without specifying this information. - * **`deprecated` (Optional, boolean)**: Marks this index template as deprecated. When creating or updating a non-deprecated index template that uses deprecated components, Elasticsearch will emit a deprecation warning. - * **`create` (Optional, boolean)**: If `true`, this request cannot replace or update existing component templates. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +## client.cluster.putSettings [_cluster.put_settings] +Update the cluster settings. +Configure and update dynamic settings on a running cluster. +You can also configure dynamic settings locally on an unstarted or shut down node in `elasticsearch.yml`. +Updates made with this API can be persistent, which apply across cluster restarts, or transient, which reset after a cluster restart. +You can also reset transient or persistent settings by assigning them a null value. -### put_settings [_put_settings] +If you configure the same setting using multiple methods, Elasticsearch applies the settings in following order of precedence: 1) Transient setting; 2) Persistent setting; 3) `elasticsearch.yml` setting; 4) Default setting value. +For example, you can apply a transient setting to override a persistent setting or `elasticsearch.yml` setting. +However, a change to an `elasticsearch.yml` setting will not override a defined transient or persistent setting. -Update the cluster settings. Configure and update dynamic settings on a running cluster. You can also configure dynamic settings locally on an unstarted or shut down node in `elasticsearch.yml`. +TIP: In Elastic Cloud, use the user settings feature to configure all cluster settings. This method automatically rejects unsafe settings that could break your cluster. +If you run Elasticsearch on your own hardware, use this API to configure dynamic cluster settings. +Only use `elasticsearch.yml` for static cluster settings and node settings. +The API doesn’t require a restart and ensures a setting’s value is the same on all nodes. -Updates made with this API can be persistent, which apply across cluster restarts, or transient, which reset after a cluster restart. You can also reset transient or persistent settings by assigning them a null value. +WARNING: Transient cluster settings are no longer recommended. Use persistent cluster settings instead. +If a cluster becomes unstable, transient settings can clear unexpectedly, resulting in a potentially undesired cluster configuration. -If you configure the same setting using multiple methods, Elasticsearch applies the settings in following order of precedence: 1) Transient setting; 2) Persistent setting; 3) `elasticsearch.yml` setting; 4) Default setting value. For example, you can apply a transient setting to override a persistent setting or `elasticsearch.yml` setting. However, a change to an `elasticsearch.yml` setting will not override a defined transient or persistent setting. - -::::{tip} -In Elastic Cloud, use the user settings feature to configure all cluster settings. This method automatically rejects unsafe settings that could break your cluster. If you run Elasticsearch on your own hardware, use this API to configure dynamic cluster settings. Only use `elasticsearch.yml` for static cluster settings and node settings. The API doesn’t require a restart and ensures a setting’s value is the same on all nodes. -:::: - - -::::{warning} -Transient cluster settings are no longer recommended. Use persistent cluster settings instead. If a cluster becomes unstable, transient settings can clear unexpectedly, resulting in a potentially undesired cluster configuration. -:::: - - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-put-settings) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-cluster-put-settings) ```ts client.cluster.putSettings({ ... }) ``` +### Arguments [_arguments_cluster.put_settings] -### Arguments [_arguments_98] - -* **Request (object):** - - * **`persistent` (Optional, Record)** - * **`transient` (Optional, Record)** - * **`flat_settings` (Optional, boolean)**: Return settings in flat format (default: false) - * **`master_timeout` (Optional, string | -1 | 0)**: Explicit operation timeout for connection to master node - * **`timeout` (Optional, string | -1 | 0)**: Explicit operation timeout - +#### Request (object) [_request_cluster.put_settings] +- **`persistent` (Optional, Record)** +- **`transient` (Optional, Record)** +- **`flat_settings` (Optional, boolean)**: Return settings in flat format (default: false) +- **`master_timeout` (Optional, string | -1 | 0)**: Explicit operation timeout for connection to master node +- **`timeout` (Optional, string | -1 | 0)**: Explicit operation timeout +## client.cluster.remoteInfo [_cluster.remote_info] +Get remote cluster information. -### remote_info [_remote_info] +Get information about configured remote clusters. +The API returns connection and endpoint information keyed by the configured remote cluster alias. -Get remote cluster information. Get all of the configured remote cluster information. This API returns connection and endpoint information keyed by the configured remote cluster alias. +> info +> This API returns information that reflects current state on the local cluster. +> The `connected` field does not necessarily reflect whether a remote cluster is down or unavailable, only whether there is currently an open connection to it. +> Elasticsearch does not spontaneously try to reconnect to a disconnected remote cluster. +> To trigger a reconnection, attempt a cross-cluster search, ES|QL cross-cluster search, or try the [resolve cluster endpoint](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-resolve-cluster). -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-remote-info) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-cluster-remote-info) ```ts client.cluster.remoteInfo() ``` -### reroute [_reroute] +## client.cluster.reroute [_cluster.reroute] +Reroute the cluster. +Manually change the allocation of individual shards in the cluster. +For example, a shard can be moved from one node to another explicitly, an allocation can be canceled, and an unassigned shard can be explicitly allocated to a specific node. -Reroute the cluster. Manually change the allocation of individual shards in the cluster. For example, a shard can be moved from one node to another explicitly, an allocation can be canceled, and an unassigned shard can be explicitly allocated to a specific node. +It is important to note that after processing any reroute commands Elasticsearch will perform rebalancing as normal (respecting the values of settings such as `cluster.routing.rebalance.enable`) in order to remain in a balanced state. +For example, if the requested allocation includes moving a shard from node1 to node2 then this may cause a shard to be moved from node2 back to node1 to even things out. -It is important to note that after processing any reroute commands Elasticsearch will perform rebalancing as normal (respecting the values of settings such as `cluster.routing.rebalance.enable`) in order to remain in a balanced state. For example, if the requested allocation includes moving a shard from node1 to node2 then this may cause a shard to be moved from node2 back to node1 to even things out. +The cluster can be set to disable allocations using the `cluster.routing.allocation.enable` setting. +If allocations are disabled then the only allocations that will be performed are explicit ones given using the reroute command, and consequent allocations due to rebalancing. -The cluster can be set to disable allocations using the `cluster.routing.allocation.enable` setting. If allocations are disabled then the only allocations that will be performed are explicit ones given using the reroute command, and consequent allocations due to rebalancing. - -The cluster will attempt to allocate a shard a maximum of `index.allocation.max_retries` times in a row (defaults to `5`), before giving up and leaving the shard unallocated. This scenario can be caused by structural problems such as having an analyzer which refers to a stopwords file which doesn’t exist on all nodes. +The cluster will attempt to allocate a shard a maximum of `index.allocation.max_retries` times in a row (defaults to `5`), before giving up and leaving the shard unallocated. +This scenario can be caused by structural problems such as having an analyzer which refers to a stopwords file which doesn’t exist on all nodes. Once the problem has been corrected, allocation can be manually retried by calling the reroute API with the `?retry_failed` URI query parameter, which will attempt a single retry round for these shards. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-reroute) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-cluster-reroute) ```ts client.cluster.reroute({ ... }) ``` +### Arguments [_arguments_cluster.reroute] -### Arguments [_arguments_99] - -* **Request (object):** - - * **`commands` (Optional, { cancel, move, allocate_replica, allocate_stale_primary, allocate_empty_primary }[])**: Defines the commands to perform. - * **`dry_run` (Optional, boolean)**: If true, then the request simulates the operation. It will calculate the result of applying the commands to the current cluster state and return the resulting cluster state after the commands (and rebalancing) have been applied; it will not actually perform the requested changes. - * **`explain` (Optional, boolean)**: If true, then the response contains an explanation of why the commands can or cannot run. - * **`metric` (Optional, string | string[])**: Limits the information returned to the specified metrics. - * **`retry_failed` (Optional, boolean)**: If true, then retries allocation of shards that are blocked due to too many subsequent allocation failures. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - +#### Request (object) [_request_cluster.reroute] +- **`commands` (Optional, { cancel, move, allocate_replica, allocate_stale_primary, allocate_empty_primary }[])**: Defines the commands to perform. +- **`dry_run` (Optional, boolean)**: If true, then the request simulates the operation. +It will calculate the result of applying the commands to the current cluster state and return the resulting cluster state after the commands (and rebalancing) have been applied; it will not actually perform the requested changes. +- **`explain` (Optional, boolean)**: If true, then the response contains an explanation of why the commands can or cannot run. +- **`metric` (Optional, string | string[])**: Limits the information returned to the specified metrics. +- **`retry_failed` (Optional, boolean)**: If true, then retries allocation of shards that are blocked due to too many subsequent allocation failures. +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - -### state [_state] - -Get the cluster state. Get comprehensive information about the state of the cluster. +## client.cluster.state [_cluster.state] +Get the cluster state. +Get comprehensive information about the state of the cluster. The cluster state is an internal data structure which keeps track of a variety of information needed by every node, including the identity and attributes of the other nodes in the cluster; cluster-wide settings; index metadata, including the mapping and settings for each index; the location and status of every shard copy in the cluster. -The elected master node ensures that every node in the cluster has a copy of the same cluster state. This API lets you retrieve a representation of this internal state for debugging or diagnostic purposes. You may need to consult the Elasticsearch source code to determine the precise meaning of the response. +The elected master node ensures that every node in the cluster has a copy of the same cluster state. +This API lets you retrieve a representation of this internal state for debugging or diagnostic purposes. +You may need to consult the Elasticsearch source code to determine the precise meaning of the response. -By default the API will route requests to the elected master node since this node is the authoritative source of cluster states. You can also retrieve the cluster state held on the node handling the API request by adding the `?local=true` query parameter. +By default the API will route requests to the elected master node since this node is the authoritative source of cluster states. +You can also retrieve the cluster state held on the node handling the API request by adding the `?local=true` query parameter. -Elasticsearch may need to expend significant effort to compute a response to this API in larger clusters, and the response may comprise a very large quantity of data. If you use this API repeatedly, your cluster may become unstable. +Elasticsearch may need to expend significant effort to compute a response to this API in larger clusters, and the response may comprise a very large quantity of data. +If you use this API repeatedly, your cluster may become unstable. -::::{warning} -The response is a representation of an internal data structure. Its format is not subject to the same compatibility guarantees as other more stable APIs and may change from version to version. Do not query this API using external monitoring tools. Instead, obtain the information you require using other more stable cluster APIs. -:::: +WARNING: The response is a representation of an internal data structure. +Its format is not subject to the same compatibility guarantees as other more stable APIs and may change from version to version. +Do not query this API using external monitoring tools. +Instead, obtain the information you require using other more stable cluster APIs. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-state) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-cluster-state) ```ts client.cluster.state({ ... }) ``` +### Arguments [_arguments_cluster.state] -### Arguments [_arguments_100] - -* **Request (object):** +#### Request (object) [_request_cluster.state] +- **`metric` (Optional, string | string[])**: Limit the information returned to the specified metrics +- **`index` (Optional, string | string[])**: A list of index names; use `_all` or empty string to perform the operation on all indices +- **`allow_no_indices` (Optional, boolean)**: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) +- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Whether to expand wildcard expression to concrete indices that are open, closed or both. +- **`flat_settings` (Optional, boolean)**: Return settings in flat format (default: false) +- **`ignore_unavailable` (Optional, boolean)**: Whether specified concrete indices should be ignored when unavailable (missing or closed) +- **`local` (Optional, boolean)**: Return local information, do not retrieve the state from master node (default: false) +- **`master_timeout` (Optional, string | -1 | 0)**: Specify timeout for connection to master +- **`wait_for_metadata_version` (Optional, number)**: Wait for the metadata version to be equal or greater than the specified metadata version +- **`wait_for_timeout` (Optional, string | -1 | 0)**: The maximum time to wait for wait_for_metadata_version before timing out - * **`metric` (Optional, string | string[])**: Limit the information returned to the specified metrics - * **`index` (Optional, string | string[])**: A list of index names; use `_all` or empty string to perform the operation on all indices - * **`allow_no_indices` (Optional, boolean)**: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) - * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Whether to expand wildcard expression to concrete indices that are open, closed or both. - * **`flat_settings` (Optional, boolean)**: Return settings in flat format (default: false) - * **`ignore_unavailable` (Optional, boolean)**: Whether specified concrete indices should be ignored when unavailable (missing or closed) - * **`local` (Optional, boolean)**: Return local information, do not retrieve the state from master node (default: false) - * **`master_timeout` (Optional, string | -1 | 0)**: Specify timeout for connection to master - * **`wait_for_metadata_version` (Optional, number)**: Wait for the metadata version to be equal or greater than the specified metadata version - * **`wait_for_timeout` (Optional, string | -1 | 0)**: The maximum time to wait for wait_for_metadata_version before timing out +## client.cluster.stats [_cluster.stats] +Get cluster statistics. +Get basic index metrics (shard numbers, store size, memory usage) and information about the current nodes that form the cluster (number, roles, os, jvm versions, memory usage, cpu and installed plugins). - - -### stats [_stats_2] - -Get cluster statistics. Get basic index metrics (shard numbers, store size, memory usage) and information about the current nodes that form the cluster (number, roles, os, jvm versions, memory usage, cpu and installed plugins). - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-stats) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-cluster-stats) ```ts client.cluster.stats({ ... }) ``` +### Arguments [_arguments_cluster.stats] -### Arguments [_arguments_101] - -* **Request (object):** - - * **`node_id` (Optional, string | string[])**: List of node filters used to limit returned information. Defaults to all nodes in the cluster. - * **`include_remotes` (Optional, boolean)**: Include remote cluster data into the response - * **`timeout` (Optional, string | -1 | 0)**: Period to wait for each node to respond. If a node does not respond before its timeout expires, the response does not include its stats. However, timed out nodes are included in the response’s `_nodes.failed` property. Defaults to no timeout. - - - -## connector [_connector] - - -### check_in [_check_in] +#### Request (object) [_request_cluster.stats] +- **`node_id` (Optional, string | string[])**: List of node filters used to limit returned information. Defaults to all nodes in the cluster. +- **`include_remotes` (Optional, boolean)**: Include remote cluster data into the response +- **`timeout` (Optional, string | -1 | 0)**: Period to wait for each node to respond. +If a node does not respond before its timeout expires, the response does not include its stats. +However, timed out nodes are included in the response’s `_nodes.failed` property. Defaults to no timeout. +## client.connector.checkIn [_connector.check_in] Check in a connector. Update the `last_seen` field in the connector and set it to the current timestamp. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-check-in) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-connector-check-in) ```ts client.connector.checkIn({ connector_id }) ``` +### Arguments [_arguments_connector.check_in] -### Arguments [_arguments_102] - -* **Request (object):** - - * **`connector_id` (string)**: The unique identifier of the connector to be checked in - - - -### delete [_delete_3] +#### Request (object) [_request_connector.check_in] +- **`connector_id` (string)**: The unique identifier of the connector to be checked in +## client.connector.delete [_connector.delete] Delete a connector. -Removes a connector and associated sync jobs. This is a destructive action that is not recoverable. NOTE: This action doesn’t delete any API keys, ingest pipelines, or data indices associated with the connector. These need to be removed manually. +Removes a connector and associated sync jobs. +This is a destructive action that is not recoverable. +NOTE: This action doesn’t delete any API keys, ingest pipelines, or data indices associated with the connector. +These need to be removed manually. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-delete) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-connector-delete) ```ts client.connector.delete({ connector_id }) ``` +### Arguments [_arguments_connector.delete] -### Arguments [_arguments_103] - -* **Request (object):** - - * **`connector_id` (string)**: The unique identifier of the connector to be deleted - * **`delete_sync_jobs` (Optional, boolean)**: A flag indicating if associated sync jobs should be also removed. Defaults to false. - * **`hard` (Optional, boolean)**: A flag indicating if the connector should be hard deleted. - - - -### get [_get_3] +#### Request (object) [_request_connector.delete] +- **`connector_id` (string)**: The unique identifier of the connector to be deleted +- **`delete_sync_jobs` (Optional, boolean)**: A flag indicating if associated sync jobs should be also removed. Defaults to false. +- **`hard` (Optional, boolean)**: A flag indicating if the connector should be hard deleted. +## client.connector.get [_connector.get] Get a connector. Get the details about a connector. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-get) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-connector-get) ```ts client.connector.get({ connector_id }) ``` +### Arguments [_arguments_connector.get] -### Arguments [_arguments_104] - -* **Request (object):** - - * **`connector_id` (string)**: The unique identifier of the connector - * **`include_deleted` (Optional, boolean)**: A flag to indicate if the desired connector should be fetched, even if it was soft-deleted. - - - -### list [_list] +#### Request (object) [_request_connector.get] +- **`connector_id` (string)**: The unique identifier of the connector +- **`include_deleted` (Optional, boolean)**: A flag to indicate if the desired connector should be fetched, even if it was soft-deleted. +## client.connector.list [_connector.list] Get all connectors. Get information about all connectors. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-list) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-connector-list) ```ts client.connector.list({ ... }) ``` +### Arguments [_arguments_connector.list] -### Arguments [_arguments_105] - -* **Request (object):** - - * **`from` (Optional, number)**: Starting offset (default: 0) - * **`size` (Optional, number)**: Specifies a max number of results to get - * **`index_name` (Optional, string | string[])**: A list of connector index names to fetch connector documents for - * **`connector_name` (Optional, string | string[])**: A list of connector names to fetch connector documents for - * **`service_type` (Optional, string | string[])**: A list of connector service types to fetch connector documents for - * **`include_deleted` (Optional, boolean)**: A flag to indicate if the desired connector should be fetched, even if it was soft-deleted. - * **`query` (Optional, string)**: A wildcard query string that filters connectors with matching name, description or index name - - - -### post [_post] +#### Request (object) [_request_connector.list] +- **`from` (Optional, number)**: Starting offset (default: 0) +- **`size` (Optional, number)**: Specifies a max number of results to get +- **`index_name` (Optional, string | string[])**: A list of connector index names to fetch connector documents for +- **`connector_name` (Optional, string | string[])**: A list of connector names to fetch connector documents for +- **`service_type` (Optional, string | string[])**: A list of connector service types to fetch connector documents for +- **`include_deleted` (Optional, boolean)**: A flag to indicate if the desired connector should be fetched, even if it was soft-deleted. +- **`query` (Optional, string)**: A wildcard query string that filters connectors with matching name, description or index name +## client.connector.post [_connector.post] Create a connector. -Connectors are Elasticsearch integrations that bring content from third-party data sources, which can be deployed on Elastic Cloud or hosted on your own infrastructure. Elastic managed connectors (Native connectors) are a managed service on Elastic Cloud. Self-managed connectors (Connector clients) are self-managed on your infrastructure. +Connectors are Elasticsearch integrations that bring content from third-party data sources, which can be deployed on Elastic Cloud or hosted on your own infrastructure. +Elastic managed connectors (Native connectors) are a managed service on Elastic Cloud. +Self-managed connectors (Connector clients) are self-managed on your infrastructure. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-put) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-connector-put) ```ts client.connector.post({ ... }) ``` +### Arguments [_arguments_connector.post] -### Arguments [_arguments_106] - -* **Request (object):** - - * **`description` (Optional, string)** - * **`index_name` (Optional, string)** - * **`is_native` (Optional, boolean)** - * **`language` (Optional, string)** - * **`name` (Optional, string)** - * **`service_type` (Optional, string)** - - - -### put [_put] +#### Request (object) [_request_connector.post] +- **`description` (Optional, string)** +- **`index_name` (Optional, string)** +- **`is_native` (Optional, boolean)** +- **`language` (Optional, string)** +- **`name` (Optional, string)** +- **`service_type` (Optional, string)** +## client.connector.put [_connector.put] Create or update a connector. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-put) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-connector-put) ```ts client.connector.put({ ... }) ``` +### Arguments [_arguments_connector.put] -### Arguments [_arguments_107] - -* **Request (object):** - - * **`connector_id` (Optional, string)**: The unique identifier of the connector to be created or updated. ID is auto-generated if not provided. - * **`description` (Optional, string)** - * **`index_name` (Optional, string)** - * **`is_native` (Optional, boolean)** - * **`language` (Optional, string)** - * **`name` (Optional, string)** - * **`service_type` (Optional, string)** - - - -### sync_job_cancel [_sync_job_cancel] +#### Request (object) [_request_connector.put] +- **`connector_id` (Optional, string)**: The unique identifier of the connector to be created or updated. ID is auto-generated if not provided. +- **`description` (Optional, string)** +- **`index_name` (Optional, string)** +- **`is_native` (Optional, boolean)** +- **`language` (Optional, string)** +- **`name` (Optional, string)** +- **`service_type` (Optional, string)** +## client.connector.syncJobCancel [_connector.sync_job_cancel] Cancel a connector sync job. -Cancel a connector sync job, which sets the status to cancelling and updates `cancellation_requested_at` to the current time. The connector service is then responsible for setting the status of connector sync jobs to cancelled. +Cancel a connector sync job, which sets the status to cancelling and updates `cancellation_requested_at` to the current time. +The connector service is then responsible for setting the status of connector sync jobs to cancelled. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-cancel) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-connector-sync-job-cancel) ```ts client.connector.syncJobCancel({ connector_sync_job_id }) ``` +### Arguments [_arguments_connector.sync_job_cancel] -### Arguments [_arguments_108] - -* **Request (object):** - - * **`connector_sync_job_id` (string)**: The unique identifier of the connector sync job - +#### Request (object) [_request_connector.sync_job_cancel] +- **`connector_sync_job_id` (string)**: The unique identifier of the connector sync job +## client.connector.syncJobCheckIn [_connector.sync_job_check_in] +Check in a connector sync job. +Check in a connector sync job and set the `last_seen` field to the current time before updating it in the internal index. -### sync_job_check_in [_sync_job_check_in] +To sync data using self-managed connectors, you need to deploy the Elastic connector service on your own infrastructure. +This service runs automatically on Elastic Cloud for Elastic managed connectors. -Check in a connector sync job. Check in a connector sync job and set the `last_seen` field to the current time before updating it in the internal index. - -To sync data using self-managed connectors, you need to deploy the Elastic connector service on your own infrastructure. This service runs automatically on Elastic Cloud for Elastic managed connectors. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-check-in) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-connector-sync-job-check-in) ```ts client.connector.syncJobCheckIn({ connector_sync_job_id }) ``` +### Arguments [_arguments_connector.sync_job_check_in] -### Arguments [_arguments_109] - -* **Request (object):** - - * **`connector_sync_job_id` (string)**: The unique identifier of the connector sync job to be checked in. +#### Request (object) [_request_connector.sync_job_check_in] +- **`connector_sync_job_id` (string)**: The unique identifier of the connector sync job to be checked in. +## client.connector.syncJobClaim [_connector.sync_job_claim] +Claim a connector sync job. +This action updates the job status to `in_progress` and sets the `last_seen` and `started_at` timestamps to the current time. +Additionally, it can set the `sync_cursor` property for the sync job. +This API is not intended for direct connector management by users. +It supports the implementation of services that utilize the connector protocol to communicate with Elasticsearch. -### sync_job_claim [_sync_job_claim] +To sync data using self-managed connectors, you need to deploy the Elastic connector service on your own infrastructure. +This service runs automatically on Elastic Cloud for Elastic managed connectors. -Claim a connector sync job. This action updates the job status to `in_progress` and sets the `last_seen` and `started_at` timestamps to the current time. Additionally, it can set the `sync_cursor` property for the sync job. - -This API is not intended for direct connector management by users. It supports the implementation of services that utilize the connector protocol to communicate with Elasticsearch. - -To sync data using self-managed connectors, you need to deploy the Elastic connector service on your own infrastructure. This service runs automatically on Elastic Cloud for Elastic managed connectors. +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-connector-sync-job-claim) ```ts client.connector.syncJobClaim({ connector_sync_job_id, worker_hostname }) ``` +### Arguments [_arguments_connector.sync_job_claim] -### Arguments [_arguments_110] - -* **Request (object):** - - * **`connector_sync_job_id` (string)**: The unique identifier of the connector sync job. - * **`worker_hostname` (string)**: The host name of the current system that will run the job. - * **`sync_cursor` (Optional, User-defined value)**: The cursor object from the last incremental sync job. This should reference the `sync_cursor` field in the connector state for which the job runs. - - - -### sync_job_delete [_sync_job_delete] +#### Request (object) [_request_connector.sync_job_claim] +- **`connector_sync_job_id` (string)**: The unique identifier of the connector sync job. +- **`worker_hostname` (string)**: The host name of the current system that will run the job. +- **`sync_cursor` (Optional, User-defined value)**: The cursor object from the last incremental sync job. +This should reference the `sync_cursor` field in the connector state for which the job runs. +## client.connector.syncJobDelete [_connector.sync_job_delete] Delete a connector sync job. -Remove a connector sync job and its associated data. This is a destructive action that is not recoverable. +Remove a connector sync job and its associated data. +This is a destructive action that is not recoverable. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-delete) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-connector-sync-job-delete) ```ts client.connector.syncJobDelete({ connector_sync_job_id }) ``` +### Arguments [_arguments_connector.sync_job_delete] -### Arguments [_arguments_111] - -* **Request (object):** +#### Request (object) [_request_connector.sync_job_delete] +- **`connector_sync_job_id` (string)**: The unique identifier of the connector sync job to be deleted - * **`connector_sync_job_id` (string)**: The unique identifier of the connector sync job to be deleted +## client.connector.syncJobError [_connector.sync_job_error] +Set a connector sync job error. +Set the `error` field for a connector sync job and set its `status` to `error`. +To sync data using self-managed connectors, you need to deploy the Elastic connector service on your own infrastructure. +This service runs automatically on Elastic Cloud for Elastic managed connectors. - -### sync_job_error [_sync_job_error] - -Set a connector sync job error. Set the `error` field for a connector sync job and set its `status` to `error`. - -To sync data using self-managed connectors, you need to deploy the Elastic connector service on your own infrastructure. This service runs automatically on Elastic Cloud for Elastic managed connectors. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-error) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-connector-sync-job-error) ```ts client.connector.syncJobError({ connector_sync_job_id, error }) ``` +### Arguments [_arguments_connector.sync_job_error] -### Arguments [_arguments_112] - -* **Request (object):** - - * **`connector_sync_job_id` (string)**: The unique identifier for the connector sync job. - * **`error` (string)**: The error for the connector sync job error field. - - - -### sync_job_get [_sync_job_get] +#### Request (object) [_request_connector.sync_job_error] +- **`connector_sync_job_id` (string)**: The unique identifier for the connector sync job. +- **`error` (string)**: The error for the connector sync job error field. +## client.connector.syncJobGet [_connector.sync_job_get] Get a connector sync job. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-get) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-connector-sync-job-get) ```ts client.connector.syncJobGet({ connector_sync_job_id }) ``` +### Arguments [_arguments_connector.sync_job_get] -### Arguments [_arguments_113] - -* **Request (object):** - - * **`connector_sync_job_id` (string)**: The unique identifier of the connector sync job - - - -### sync_job_list [_sync_job_list] +#### Request (object) [_request_connector.sync_job_get] +- **`connector_sync_job_id` (string)**: The unique identifier of the connector sync job +## client.connector.syncJobList [_connector.sync_job_list] Get all connector sync jobs. Get information about all stored connector sync jobs listed by their creation date in ascending order. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-list) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-connector-sync-job-list) ```ts client.connector.syncJobList({ ... }) ``` +### Arguments [_arguments_connector.sync_job_list] -### Arguments [_arguments_114] - -* **Request (object):** - - * **`from` (Optional, number)**: Starting offset (default: 0) - * **`size` (Optional, number)**: Specifies a max number of results to get - * **`status` (Optional, Enum("canceling" | "canceled" | "completed" | "error" | "in_progress" | "pending" | "suspended"))**: A sync job status to fetch connector sync jobs for - * **`connector_id` (Optional, string)**: A connector id to fetch connector sync jobs for - * **`job_type` (Optional, Enum("full" | "incremental" | "access_control") | Enum("full" | "incremental" | "access_control")[])**: A list of job types to fetch the sync jobs for - - - -### sync_job_post [_sync_job_post] +#### Request (object) [_request_connector.sync_job_list] +- **`from` (Optional, number)**: Starting offset (default: 0) +- **`size` (Optional, number)**: Specifies a max number of results to get +- **`status` (Optional, Enum("canceling" | "canceled" | "completed" | "error" | "in_progress" | "pending" | "suspended"))**: A sync job status to fetch connector sync jobs for +- **`connector_id` (Optional, string)**: A connector id to fetch connector sync jobs for +- **`job_type` (Optional, Enum("full" | "incremental" | "access_control") | Enum("full" | "incremental" | "access_control")[])**: A list of job types to fetch the sync jobs for +## client.connector.syncJobPost [_connector.sync_job_post] Create a connector sync job. Create a connector sync job document in the internal index and initialize its counters and timestamps with default values. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-post) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-connector-sync-job-post) ```ts client.connector.syncJobPost({ id }) ``` +### Arguments [_arguments_connector.sync_job_post] -### Arguments [_arguments_115] - -* **Request (object):** - - * **`id` (string)**: The id of the associated connector - * **`job_type` (Optional, Enum("full" | "incremental" | "access_control"))** - * **`trigger_method` (Optional, Enum("on_demand" | "scheduled"))** - - - -### sync_job_update_stats [_sync_job_update_stats] +#### Request (object) [_request_connector.sync_job_post] +- **`id` (string)**: The id of the associated connector +- **`job_type` (Optional, Enum("full" | "incremental" | "access_control"))** +- **`trigger_method` (Optional, Enum("on_demand" | "scheduled"))** -Set the connector sync job stats. Stats include: `deleted_document_count`, `indexed_document_count`, `indexed_document_volume`, and `total_document_count`. You can also update `last_seen`. This API is mainly used by the connector service for updating sync job information. +## client.connector.syncJobUpdateStats [_connector.sync_job_update_stats] +Set the connector sync job stats. +Stats include: `deleted_document_count`, `indexed_document_count`, `indexed_document_volume`, and `total_document_count`. +You can also update `last_seen`. +This API is mainly used by the connector service for updating sync job information. -To sync data using self-managed connectors, you need to deploy the Elastic connector service on your own infrastructure. This service runs automatically on Elastic Cloud for Elastic managed connectors. +To sync data using self-managed connectors, you need to deploy the Elastic connector service on your own infrastructure. +This service runs automatically on Elastic Cloud for Elastic managed connectors. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-update-stats) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-connector-sync-job-update-stats) ```ts client.connector.syncJobUpdateStats({ connector_sync_job_id, deleted_document_count, indexed_document_count, indexed_document_volume }) ``` +### Arguments [_arguments_connector.sync_job_update_stats] -### Arguments [_arguments_116] - -* **Request (object):** - - * **`connector_sync_job_id` (string)**: The unique identifier of the connector sync job. - * **`deleted_document_count` (number)**: The number of documents the sync job deleted. - * **`indexed_document_count` (number)**: The number of documents the sync job indexed. - * **`indexed_document_volume` (number)**: The total size of the data (in MiB) the sync job indexed. - * **`last_seen` (Optional, string | -1 | 0)**: The timestamp to use in the `last_seen` property for the connector sync job. - * **`metadata` (Optional, Record)**: The connector-specific metadata. - * **`total_document_count` (Optional, number)**: The total number of documents in the target index after the sync job finished. - - - -### update_active_filtering [_update_active_filtering] +#### Request (object) [_request_connector.sync_job_update_stats] +- **`connector_sync_job_id` (string)**: The unique identifier of the connector sync job. +- **`deleted_document_count` (number)**: The number of documents the sync job deleted. +- **`indexed_document_count` (number)**: The number of documents the sync job indexed. +- **`indexed_document_volume` (number)**: The total size of the data (in MiB) the sync job indexed. +- **`last_seen` (Optional, string | -1 | 0)**: The timestamp to use in the `last_seen` property for the connector sync job. +- **`metadata` (Optional, Record)**: The connector-specific metadata. +- **`total_document_count` (Optional, number)**: The total number of documents in the target index after the sync job finished. +## client.connector.updateActiveFiltering [_connector.update_active_filtering] Activate the connector draft filter. Activates the valid draft filtering for a connector. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-filtering) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-connector-update-filtering) ```ts client.connector.updateActiveFiltering({ connector_id }) ``` +### Arguments [_arguments_connector.update_active_filtering] -### Arguments [_arguments_117] - -* **Request (object):** - - * **`connector_id` (string)**: The unique identifier of the connector to be updated - - - -### update_api_key_id [_update_api_key_id] +#### Request (object) [_request_connector.update_active_filtering] +- **`connector_id` (string)**: The unique identifier of the connector to be updated +## client.connector.updateApiKeyId [_connector.update_api_key_id] Update the connector API key ID. -Update the `api_key_id` and `api_key_secret_id` fields of a connector. You can specify the ID of the API key used for authorization and the ID of the connector secret where the API key is stored. The connector secret ID is required only for Elastic managed (native) connectors. Self-managed connectors (connector clients) do not use this field. +Update the `api_key_id` and `api_key_secret_id` fields of a connector. +You can specify the ID of the API key used for authorization and the ID of the connector secret where the API key is stored. +The connector secret ID is required only for Elastic managed (native) connectors. +Self-managed connectors (connector clients) do not use this field. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-api-key-id) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-connector-update-api-key-id) ```ts client.connector.updateApiKeyId({ connector_id }) ``` +### Arguments [_arguments_connector.update_api_key_id] -### Arguments [_arguments_118] - -* **Request (object):** - - * **`connector_id` (string)**: The unique identifier of the connector to be updated - * **`api_key_id` (Optional, string)** - * **`api_key_secret_id` (Optional, string)** - - - -### update_configuration [_update_configuration] +#### Request (object) [_request_connector.update_api_key_id] +- **`connector_id` (string)**: The unique identifier of the connector to be updated +- **`api_key_id` (Optional, string)** +- **`api_key_secret_id` (Optional, string)** +## client.connector.updateConfiguration [_connector.update_configuration] Update the connector configuration. Update the configuration field in the connector document. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-configuration) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-connector-update-configuration) ```ts client.connector.updateConfiguration({ connector_id }) ``` +### Arguments [_arguments_connector.update_configuration] -### Arguments [_arguments_119] - -* **Request (object):** - - * **`connector_id` (string)**: The unique identifier of the connector to be updated - * **`configuration` (Optional, Record)** - * **`values` (Optional, Record)** - - - -### update_error [_update_error] +#### Request (object) [_request_connector.update_configuration] +- **`connector_id` (string)**: The unique identifier of the connector to be updated +- **`configuration` (Optional, Record)** +- **`values` (Optional, Record)** +## client.connector.updateError [_connector.update_error] Update the connector error field. -Set the error field for the connector. If the error provided in the request body is non-null, the connector’s status is updated to error. Otherwise, if the error is reset to null, the connector status is updated to connected. +Set the error field for the connector. +If the error provided in the request body is non-null, the connector’s status is updated to error. +Otherwise, if the error is reset to null, the connector status is updated to connected. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-error) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-connector-update-error) ```ts client.connector.updateError({ connector_id, error }) ``` +### Arguments [_arguments_connector.update_error] -### Arguments [_arguments_120] - -* **Request (object):** - - * **`connector_id` (string)**: The unique identifier of the connector to be updated - * **`error` (T | null)** - - +#### Request (object) [_request_connector.update_error] +- **`connector_id` (string)**: The unique identifier of the connector to be updated +- **`error` (T | null)** -### update_features [_update_features] - -Update the connector features. Update the connector features in the connector document. This API can be used to control the following aspects of a connector: +## client.connector.updateFeatures [_connector.update_features] +Update the connector features. +Update the connector features in the connector document. +This API can be used to control the following aspects of a connector: * document-level security * incremental syncs * advanced sync rules * basic sync rules -Normally, the running connector service automatically manages these features. However, you can use this API to override the default behavior. +Normally, the running connector service automatically manages these features. +However, you can use this API to override the default behavior. -To sync data using self-managed connectors, you need to deploy the Elastic connector service on your own infrastructure. This service runs automatically on Elastic Cloud for Elastic managed connectors. +To sync data using self-managed connectors, you need to deploy the Elastic connector service on your own infrastructure. +This service runs automatically on Elastic Cloud for Elastic managed connectors. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-features) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-connector-update-features) ```ts client.connector.updateFeatures({ connector_id, features }) ``` +### Arguments [_arguments_connector.update_features] -### Arguments [_arguments_121] - -* **Request (object):** - - * **`connector_id` (string)**: The unique identifier of the connector to be updated. - * **`features` ({ document_level_security, incremental_sync, native_connector_api_keys, sync_rules })** - - - -### update_filtering [_update_filtering] +#### Request (object) [_request_connector.update_features] +- **`connector_id` (string)**: The unique identifier of the connector to be updated. +- **`features` ({ document_level_security, incremental_sync, native_connector_api_keys, sync_rules })** +## client.connector.updateFiltering [_connector.update_filtering] Update the connector filtering. -Update the draft filtering configuration of a connector and marks the draft validation state as edited. The filtering draft is activated once validated by the running Elastic connector service. The filtering property is used to configure sync rules (both basic and advanced) for a connector. +Update the draft filtering configuration of a connector and marks the draft validation state as edited. +The filtering draft is activated once validated by the running Elastic connector service. +The filtering property is used to configure sync rules (both basic and advanced) for a connector. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-filtering) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-connector-update-filtering) ```ts client.connector.updateFiltering({ connector_id }) ``` +### Arguments [_arguments_connector.update_filtering] -### Arguments [_arguments_122] +#### Request (object) [_request_connector.update_filtering] +- **`connector_id` (string)**: The unique identifier of the connector to be updated +- **`filtering` (Optional, { active, domain, draft }[])** +- **`rules` (Optional, { created_at, field, id, order, policy, rule, updated_at, value }[])** +- **`advanced_snippet` (Optional, { created_at, updated_at, value })** -* **Request (object):** +## client.connector.updateFilteringValidation [_connector.update_filtering_validation] +Update the connector draft filtering validation. - * **`connector_id` (string)**: The unique identifier of the connector to be updated - * **`filtering` (Optional, { active, domain, draft }[])** - * **`rules` (Optional, { created_at, field, id, order, policy, rule, updated_at, value }[])** - * **`advanced_snippet` (Optional, { created_at, updated_at, value })** +Update the draft filtering validation info for a connector. - - -### update_filtering_validation [_update_filtering_validation] - -Update the connector draft filtering validation. - -Update the draft filtering validation info for a connector. +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-connector-update-filtering-validation) ```ts client.connector.updateFilteringValidation({ connector_id, validation }) ``` +### Arguments [_arguments_connector.update_filtering_validation] -### Arguments [_arguments_123] - -* **Request (object):** - - * **`connector_id` (string)**: The unique identifier of the connector to be updated - * **`validation` ({ errors, state })** - - - -### update_index_name [_update_index_name] +#### Request (object) [_request_connector.update_filtering_validation] +- **`connector_id` (string)**: The unique identifier of the connector to be updated +- **`validation` ({ errors, state })** +## client.connector.updateIndexName [_connector.update_index_name] Update the connector index name. Update the `index_name` field of a connector, specifying the index where the data ingested by the connector is stored. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-index-name) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-connector-update-index-name) ```ts client.connector.updateIndexName({ connector_id, index_name }) ``` +### Arguments [_arguments_connector.update_index_name] -### Arguments [_arguments_124] - -* **Request (object):** - - * **`connector_id` (string)**: The unique identifier of the connector to be updated - * **`index_name` (T | null)** - - - -### update_name [_update_name] +#### Request (object) [_request_connector.update_index_name] +- **`connector_id` (string)**: The unique identifier of the connector to be updated +- **`index_name` (T | null)** +## client.connector.updateName [_connector.update_name] Update the connector name and description. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-name) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-connector-update-name) ```ts client.connector.updateName({ connector_id }) ``` +### Arguments [_arguments_connector.update_name] -### Arguments [_arguments_125] - -* **Request (object):** - - * **`connector_id` (string)**: The unique identifier of the connector to be updated - * **`name` (Optional, string)** - * **`description` (Optional, string)** - - - -### update_native [_update_native] +#### Request (object) [_request_connector.update_name] +- **`connector_id` (string)**: The unique identifier of the connector to be updated +- **`name` (Optional, string)** +- **`description` (Optional, string)** +## client.connector.updateNative [_connector.update_native] Update the connector is_native flag. +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-connector-update-native) + ```ts client.connector.updateNative({ connector_id, is_native }) ``` +### Arguments [_arguments_connector.update_native] -### Arguments [_arguments_126] - -* **Request (object):** - - * **`connector_id` (string)**: The unique identifier of the connector to be updated - * **`is_native` (boolean)** - - - -### update_pipeline [_update_pipeline] +#### Request (object) [_request_connector.update_native] +- **`connector_id` (string)**: The unique identifier of the connector to be updated +- **`is_native` (boolean)** +## client.connector.updatePipeline [_connector.update_pipeline] Update the connector pipeline. When you create a new connector, the configuration of an ingest pipeline is populated with default settings. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-pipeline) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-connector-update-pipeline) ```ts client.connector.updatePipeline({ connector_id, pipeline }) ``` +### Arguments [_arguments_connector.update_pipeline] -### Arguments [_arguments_127] - -* **Request (object):** - - * **`connector_id` (string)**: The unique identifier of the connector to be updated - * **`pipeline` ({ extract_binary_content, name, reduce_whitespace, run_ml_inference })** - - - -### update_scheduling [_update_scheduling] +#### Request (object) [_request_connector.update_pipeline] +- **`connector_id` (string)**: The unique identifier of the connector to be updated +- **`pipeline` ({ extract_binary_content, name, reduce_whitespace, run_ml_inference })** +## client.connector.updateScheduling [_connector.update_scheduling] Update the connector scheduling. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-scheduling) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-connector-update-scheduling) ```ts client.connector.updateScheduling({ connector_id, scheduling }) ``` +### Arguments [_arguments_connector.update_scheduling] -### Arguments [_arguments_128] - -* **Request (object):** - - * **`connector_id` (string)**: The unique identifier of the connector to be updated - * **`scheduling` ({ access_control, full, incremental })** - - - -### update_service_type [_update_service_type] +#### Request (object) [_request_connector.update_scheduling] +- **`connector_id` (string)**: The unique identifier of the connector to be updated +- **`scheduling` ({ access_control, full, incremental })** +## client.connector.updateServiceType [_connector.update_service_type] Update the connector service type. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-service-type) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-connector-update-service-type) ```ts client.connector.updateServiceType({ connector_id, service_type }) ``` +### Arguments [_arguments_connector.update_service_type] -### Arguments [_arguments_129] - -* **Request (object):** - - * **`connector_id` (string)**: The unique identifier of the connector to be updated - * **`service_type` (string)** - - - -### update_status [_update_status] +#### Request (object) [_request_connector.update_service_type] +- **`connector_id` (string)**: The unique identifier of the connector to be updated +- **`service_type` (string)** +## client.connector.updateStatus [_connector.update_status] Update the connector status. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-status) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-connector-update-status) ```ts client.connector.updateStatus({ connector_id, status }) ``` +### Arguments [_arguments_connector.update_status] -### Arguments [_arguments_130] +#### Request (object) [_request_connector.update_status] +- **`connector_id` (string)**: The unique identifier of the connector to be updated +- **`status` (Enum("created" | "needs_configuration" | "configured" | "connected" | "error"))** -* **Request (object):** - - * **`connector_id` (string)**: The unique identifier of the connector to be updated - * **`status` (Enum("created" | "needs_configuration" | "configured" | "connected" | "error"))** +## client.danglingIndices.deleteDanglingIndex [_dangling_indices.delete_dangling_index] +Delete a dangling index. +If Elasticsearch encounters index data that is absent from the current cluster state, those indices are considered to be dangling. +For example, this can happen if you delete more than `cluster.indices.tombstones.size` indices while an Elasticsearch node is offline. +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-dangling-indices-delete-dangling-index) +```ts +client.danglingIndices.deleteDanglingIndex({ index_uuid, accept_data_loss }) +``` -## dangling_indices [_dangling_indices] +### Arguments [_arguments_dangling_indices.delete_dangling_index] +#### Request (object) [_request_dangling_indices.delete_dangling_index] +- **`index_uuid` (string)**: The UUID of the index to delete. Use the get dangling indices API to find the UUID. +- **`accept_data_loss` (boolean)**: This parameter must be set to true to acknowledge that it will no longer be possible to recove data from the dangling index. +- **`master_timeout` (Optional, string | -1 | 0)**: Specify timeout for connection to master +- **`timeout` (Optional, string | -1 | 0)**: Explicit operation timeout -### delete_dangling_index [_delete_dangling_index] +## client.danglingIndices.importDanglingIndex [_dangling_indices.import_dangling_index] +Import a dangling index. -Delete a dangling index. If Elasticsearch encounters index data that is absent from the current cluster state, those indices are considered to be dangling. For example, this can happen if you delete more than `cluster.indices.tombstones.size` indices while an Elasticsearch node is offline. +If Elasticsearch encounters index data that is absent from the current cluster state, those indices are considered to be dangling. +For example, this can happen if you delete more than `cluster.indices.tombstones.size` indices while an Elasticsearch node is offline. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-dangling-indices-delete-dangling-index) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-dangling-indices-import-dangling-index) ```ts -client.danglingIndices.deleteDanglingIndex({ index_uuid, accept_data_loss }) +client.danglingIndices.importDanglingIndex({ index_uuid, accept_data_loss }) ``` +### Arguments [_arguments_dangling_indices.import_dangling_index] -### Arguments [_arguments_131] +#### Request (object) [_request_dangling_indices.import_dangling_index] +- **`index_uuid` (string)**: The UUID of the index to import. Use the get dangling indices API to locate the UUID. +- **`accept_data_loss` (boolean)**: This parameter must be set to true to import a dangling index. +Because Elasticsearch cannot know where the dangling index data came from or determine which shard copies are fresh and which are stale, it cannot guarantee that the imported data represents the latest state of the index when it was last in the cluster. +- **`master_timeout` (Optional, string | -1 | 0)**: Specify timeout for connection to master +- **`timeout` (Optional, string | -1 | 0)**: Explicit operation timeout -* **Request (object):** +## client.danglingIndices.listDanglingIndices [_dangling_indices.list_dangling_indices] +Get the dangling indices. - * **`index_uuid` (string)**: The UUID of the index to delete. Use the get dangling indices API to find the UUID. - * **`accept_data_loss` (boolean)**: This parameter must be set to true to acknowledge that it will no longer be possible to recove data from the dangling index. - * **`master_timeout` (Optional, string | -1 | 0)**: Specify timeout for connection to master - * **`timeout` (Optional, string | -1 | 0)**: Explicit operation timeout +If Elasticsearch encounters index data that is absent from the current cluster state, those indices are considered to be dangling. +For example, this can happen if you delete more than `cluster.indices.tombstones.size` indices while an Elasticsearch node is offline. +Use this API to list dangling indices, which you can then import or delete. +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-dangling-indices-list-dangling-indices) -### import_dangling_index [_import_dangling_index] +```ts +client.danglingIndices.listDanglingIndices() +``` -Import a dangling index. -If Elasticsearch encounters index data that is absent from the current cluster state, those indices are considered to be dangling. For example, this can happen if you delete more than `cluster.indices.tombstones.size` indices while an Elasticsearch node is offline. +## client.enrich.deletePolicy [_enrich.delete_policy] +Delete an enrich policy. +Deletes an existing enrich policy and its enrich index. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-dangling-indices-import-dangling-index) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-enrich-delete-policy) ```ts -client.danglingIndices.importDanglingIndex({ index_uuid, accept_data_loss }) +client.enrich.deletePolicy({ name }) ``` +### Arguments [_arguments_enrich.delete_policy] -### Arguments [_arguments_132] - -* **Request (object):** +#### Request (object) [_request_enrich.delete_policy] +- **`name` (string)**: Enrich policy to delete. +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. - * **`index_uuid` (string)**: The UUID of the index to import. Use the get dangling indices API to locate the UUID. - * **`accept_data_loss` (boolean)**: This parameter must be set to true to import a dangling index. Because Elasticsearch cannot know where the dangling index data came from or determine which shard copies are fresh and which are stale, it cannot guarantee that the imported data represents the latest state of the index when it was last in the cluster. - * **`master_timeout` (Optional, string | -1 | 0)**: Specify timeout for connection to master - * **`timeout` (Optional, string | -1 | 0)**: Explicit operation timeout +## client.enrich.executePolicy [_enrich.execute_policy] +Run an enrich policy. +Create the enrich index for an existing enrich policy. +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-enrich-execute-policy) +```ts +client.enrich.executePolicy({ name }) +``` -### list_dangling_indices [_list_dangling_indices] - -Get the dangling indices. +### Arguments [_arguments_enrich.execute_policy] -If Elasticsearch encounters index data that is absent from the current cluster state, those indices are considered to be dangling. For example, this can happen if you delete more than `cluster.indices.tombstones.size` indices while an Elasticsearch node is offline. +#### Request (object) [_request_enrich.execute_policy] +- **`name` (string)**: Enrich policy to execute. +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +- **`wait_for_completion` (Optional, boolean)**: If `true`, the request blocks other enrich policy execution requests until complete. -Use this API to list dangling indices, which you can then import or delete. +## client.enrich.getPolicy [_enrich.get_policy] +Get an enrich policy. +Returns information about an enrich policy. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-dangling-indices-list-dangling-indices) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-enrich-get-policy) ```ts -client.danglingIndices.listDanglingIndices() +client.enrich.getPolicy({ ... }) ``` +### Arguments [_arguments_enrich.get_policy] + +#### Request (object) [_request_enrich.get_policy] +- **`name` (Optional, string | string[])**: List of enrich policy names used to limit the request. +To return information for all enrich policies, omit this parameter. +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. + +## client.enrich.putPolicy [_enrich.put_policy] +Create an enrich policy. +Creates an enrich policy. -## enrich [_enrich] +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-enrich-put-policy) + +```ts +client.enrich.putPolicy({ name }) +``` +### Arguments [_arguments_enrich.put_policy] -### delete_policy [_delete_policy] +#### Request (object) [_request_enrich.put_policy] +- **`name` (string)**: Name of the enrich policy to create or update. +- **`geo_match` (Optional, { enrich_fields, indices, match_field, query, name, elasticsearch_version })**: Matches enrich data to incoming documents based on a `geo_shape` query. +- **`match` (Optional, { enrich_fields, indices, match_field, query, name, elasticsearch_version })**: Matches enrich data to incoming documents based on a `term` query. +- **`range` (Optional, { enrich_fields, indices, match_field, query, name, elasticsearch_version })**: Matches a number, date, or IP address in incoming documents to a range in the enrich index based on a `term` query. +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. -Delete an enrich policy. Deletes an existing enrich policy and its enrich index. +## client.enrich.stats [_enrich.stats] +Get enrich stats. +Returns enrich coordinator statistics and information about enrich policies that are currently executing. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-enrich-delete-policy) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-enrich-stats) ```ts -client.enrich.deletePolicy({ name }) +client.enrich.stats({ ... }) ``` +### Arguments [_arguments_enrich.stats] -### Arguments [_arguments_133] +#### Request (object) [_request_enrich.stats] +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. -* **Request (object):** +## client.eql.delete [_eql.delete] +Delete an async EQL search. +Delete an async EQL search or a stored synchronous EQL search. +The API also deletes results for the search. - * **`name` (string)**: Enrich policy to delete. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-eql-delete) +```ts +client.eql.delete({ id }) +``` +### Arguments [_arguments_eql.delete] -### execute_policy [_execute_policy] +#### Request (object) [_request_eql.delete] +- **`id` (string)**: Identifier for the search to delete. +A search ID is provided in the EQL search API's response for an async search. +A search ID is also provided if the request’s `keep_on_completion` parameter is `true`. -Run an enrich policy. Create the enrich index for an existing enrich policy. +## client.eql.get [_eql.get] +Get async EQL search results. +Get the current status and available results for an async EQL search or a stored synchronous EQL search. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-enrich-execute-policy) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-eql-get) ```ts -client.enrich.executePolicy({ name }) +client.eql.get({ id }) ``` +### Arguments [_arguments_eql.get] -### Arguments [_arguments_134] +#### Request (object) [_request_eql.get] +- **`id` (string)**: Identifier for the search. +- **`keep_alive` (Optional, string | -1 | 0)**: Period for which the search and its results are stored on the cluster. +Defaults to the keep_alive value set by the search’s EQL search API request. +- **`wait_for_completion_timeout` (Optional, string | -1 | 0)**: Timeout duration to wait for the request to finish. +Defaults to no timeout, meaning the request waits for complete search results. -* **Request (object):** +## client.eql.getStatus [_eql.get_status] +Get the async EQL status. +Get the current status for an async EQL search or a stored synchronous EQL search without returning results. - * **`name` (string)**: Enrich policy to execute. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. - * **`wait_for_completion` (Optional, boolean)**: If `true`, the request blocks other enrich policy execution requests until complete. +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-eql-get-status) +```ts +client.eql.getStatus({ id }) +``` +### Arguments [_arguments_eql.get_status] -### get_policy [_get_policy] +#### Request (object) [_request_eql.get_status] +- **`id` (string)**: Identifier for the search. -Get an enrich policy. Returns information about an enrich policy. +## client.eql.search [_eql.search] +Get EQL search results. +Returns search results for an Event Query Language (EQL) query. +EQL assumes each document in a data stream or index corresponds to an event. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-enrich-get-policy) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-eql-search) ```ts -client.enrich.getPolicy({ ... }) +client.eql.search({ index, query }) ``` +### Arguments [_arguments_eql.search] + +#### Request (object) [_request_eql.search] +- **`index` (string | string[])**: The name of the index to scope the operation +- **`query` (string)**: EQL query you wish to run. +- **`case_sensitive` (Optional, boolean)** +- **`event_category_field` (Optional, string)**: Field containing the event classification, such as process, file, or network. +- **`tiebreaker_field` (Optional, string)**: Field used to sort hits with the same timestamp in ascending order +- **`timestamp_field` (Optional, string)**: Field containing event timestamp. Default "@timestamp" +- **`fetch_size` (Optional, number)**: Maximum number of events to search at a time for sequence queries. +- **`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type } | { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type }[])**: Query, written in Query DSL, used to filter the events on which the EQL query runs. +- **`keep_alive` (Optional, string | -1 | 0)** +- **`keep_on_completion` (Optional, boolean)** +- **`wait_for_completion_timeout` (Optional, string | -1 | 0)** +- **`allow_partial_search_results` (Optional, boolean)**: Allow query execution also in case of shard failures. +If true, the query will keep running and will return results based on the available shards. +For sequences, the behavior can be further refined using allow_partial_sequence_results +- **`allow_partial_sequence_results` (Optional, boolean)**: This flag applies only to sequences and has effect only if allow_partial_search_results=true. +If true, the sequence query will return results based on the available shards, ignoring the others. +If false, the sequence query will return successfully, but will always have empty results. +- **`size` (Optional, number)**: For basic queries, the maximum number of matching events to return. Defaults to 10 +- **`fields` (Optional, { field, format, include_unmapped } | { field, format, include_unmapped }[])**: Array of wildcard (*) patterns. The response returns values for field names matching these patterns in the fields property of each hit. +- **`result_position` (Optional, Enum("tail" | "head"))** +- **`runtime_mappings` (Optional, Record)** +- **`max_samples_per_key` (Optional, number)**: By default, the response of a sample query contains up to `10` samples, with one sample per unique set of join keys. Use the `size` +parameter to get a smaller or larger set of samples. To retrieve more than one sample per set of join keys, use the +`max_samples_per_key` parameter. Pipes are not supported for sample queries. +- **`allow_no_indices` (Optional, boolean)** +- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])** +- **`ignore_unavailable` (Optional, boolean)**: If true, missing or closed indices are not included in the response. + +## client.esql.asyncQuery [_esql.async_query] +Run an async ES|QL query. +Asynchronously run an ES|QL (Elasticsearch query language) query, monitor its progress, and retrieve results when they become available. -### Arguments [_arguments_135] - -* **Request (object):** +The API accepts the same parameters and request body as the synchronous query API, along with additional async related properties. - * **`name` (Optional, string | string[])**: List of enrich policy names used to limit the request. To return information for all enrich policies, omit this parameter. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-esql-async-query) +```ts +client.esql.asyncQuery({ query }) +``` +### Arguments [_arguments_esql.async_query] + +#### Request (object) [_request_esql.async_query] +- **`query` (string)**: The ES|QL query API accepts an ES|QL query string in the query parameter, runs it, and returns the results. +- **`columnar` (Optional, boolean)**: By default, ES|QL returns results as rows. For example, FROM returns each individual document as one row. For the JSON, YAML, CBOR and smile formats, ES|QL can return the results in a columnar fashion where one row represents all the values of a certain column in the results. +- **`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: Specify a Query DSL query in the filter parameter to filter the set of documents that an ES|QL query runs on. +- **`locale` (Optional, string)** +- **`params` (Optional, number | number | string | boolean | null[])**: To avoid any attempts of hacking or code injection, extract the values in a separate list of parameters. Use question mark placeholders (?) in the query string for each of the parameters. +- **`profile` (Optional, boolean)**: If provided and `true` the response will include an extra `profile` object +with information on how the query was executed. This information is for human debugging +and its format can change at any time but it can give some insight into the performance +of each part of the query. +- **`tables` (Optional, Record>)**: Tables to use with the LOOKUP operation. The top level key is the table +name and the next level key is the column name. +- **`include_ccs_metadata` (Optional, boolean)**: When set to `true` and performing a cross-cluster query, the response will include an extra `_clusters` +object with information about the clusters that participated in the search along with info such as shards +count. +- **`wait_for_completion_timeout` (Optional, string | -1 | 0)**: The period to wait for the request to finish. +By default, the request waits for 1 second for the query results. +If the query completes during this period, results are returned +Otherwise, a query ID is returned that can later be used to retrieve the results. +- **`delimiter` (Optional, string)**: The character to use between values within a CSV row. +It is valid only for the CSV format. +- **`drop_null_columns` (Optional, boolean)**: Indicates whether columns that are entirely `null` will be removed from the `columns` and `values` portion of the results. +If `true`, the response will include an extra section under the name `all_columns` which has the name of all the columns. +- **`format` (Optional, Enum("csv" | "json" | "tsv" | "txt" | "yaml" | "cbor" | "smile" | "arrow"))**: A short version of the Accept header, for example `json` or `yaml`. +- **`keep_alive` (Optional, string | -1 | 0)**: The period for which the query and its results are stored in the cluster. +The default period is five days. +When this period expires, the query and its results are deleted, even if the query is still ongoing. +If the `keep_on_completion` parameter is false, Elasticsearch only stores async queries that do not complete within the period set by the `wait_for_completion_timeout` parameter, regardless of this value. +- **`keep_on_completion` (Optional, boolean)**: Indicates whether the query and its results are stored in the cluster. +If false, the query and its results are stored in the cluster only if the request does not complete during the period set by the `wait_for_completion_timeout` parameter. + +## client.esql.asyncQueryDelete [_esql.async_query_delete] +Delete an async ES|QL query. +If the query is still running, it is cancelled. +Otherwise, the stored results are deleted. -### put_policy [_put_policy] +If the Elasticsearch security features are enabled, only the following users can use this API to delete a query: -Create an enrich policy. Creates an enrich policy. +* The authenticated user that submitted the original query request +* Users with the `cancel_task` cluster privilege -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-enrich-put-policy) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-esql-async-query-delete) ```ts -client.enrich.putPolicy({ name }) +client.esql.asyncQueryDelete({ id }) ``` +### Arguments [_arguments_esql.async_query_delete] -### Arguments [_arguments_136] +#### Request (object) [_request_esql.async_query_delete] +- **`id` (string)**: The unique identifier of the query. +A query ID is provided in the ES|QL async query API response for a query that does not complete in the designated time. +A query ID is also provided when the request was submitted with the `keep_on_completion` parameter set to `true`. -* **Request (object):** +## client.esql.asyncQueryGet [_esql.async_query_get] +Get async ES|QL query results. +Get the current status and available results or stored results for an ES|QL asynchronous query. +If the Elasticsearch security features are enabled, only the user who first submitted the ES|QL query can retrieve the results using this API. - * **`name` (string)**: Name of the enrich policy to create or update. - * **`geo_match` (Optional, { enrich_fields, indices, match_field, query, name, elasticsearch_version })**: Matches enrich data to incoming documents based on a `geo_shape` query. - * **`match` (Optional, { enrich_fields, indices, match_field, query, name, elasticsearch_version })**: Matches enrich data to incoming documents based on a `term` query. - * **`range` (Optional, { enrich_fields, indices, match_field, query, name, elasticsearch_version })**: Matches a number, date, or IP address in incoming documents to a range in the enrich index based on a `term` query. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-esql-async-query-get) + +```ts +client.esql.asyncQueryGet({ id }) +``` +### Arguments [_arguments_esql.async_query_get] +#### Request (object) [_request_esql.async_query_get] +- **`id` (string)**: The unique identifier of the query. +A query ID is provided in the ES|QL async query API response for a query that does not complete in the designated time. +A query ID is also provided when the request was submitted with the `keep_on_completion` parameter set to `true`. +- **`drop_null_columns` (Optional, boolean)**: Indicates whether columns that are entirely `null` will be removed from the `columns` and `values` portion of the results. +If `true`, the response will include an extra section under the name `all_columns` which has the name of all the columns. +- **`keep_alive` (Optional, string | -1 | 0)**: The period for which the query and its results are stored in the cluster. +When this period expires, the query and its results are deleted, even if the query is still ongoing. +- **`wait_for_completion_timeout` (Optional, string | -1 | 0)**: The period to wait for the request to finish. +By default, the request waits for complete query results. +If the request completes during the period specified in this parameter, complete query results are returned. +Otherwise, the response returns an `is_running` value of `true` and no results. -### stats [_stats_3] +## client.esql.asyncQueryStop [_esql.async_query_stop] +Stop async ES|QL query. -Get enrich stats. Returns enrich coordinator statistics and information about enrich policies that are currently executing. +This API interrupts the query execution and returns the results so far. +If the Elasticsearch security features are enabled, only the user who first submitted the ES|QL query can stop it. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-enrich-stats) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-esql-async-query-stop) ```ts -client.enrich.stats({ ... }) +client.esql.asyncQueryStop({ id }) ``` +### Arguments [_arguments_esql.async_query_stop] -### Arguments [_arguments_137] +#### Request (object) [_request_esql.async_query_stop] +- **`id` (string)**: The unique identifier of the query. +A query ID is provided in the ES|QL async query API response for a query that does not complete in the designated time. +A query ID is also provided when the request was submitted with the `keep_on_completion` parameter set to `true`. +- **`drop_null_columns` (Optional, boolean)**: Indicates whether columns that are entirely `null` will be removed from the `columns` and `values` portion of the results. +If `true`, the response will include an extra section under the name `all_columns` which has the name of all the columns. -* **Request (object):** +## client.esql.query [_esql.query] +Run an ES|QL query. +Get search results for an ES|QL (Elasticsearch query language) query. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +[Endpoint documentation](https://www.elastic.co/docs/explore-analyze/query-filter/languages/esql-rest) +```ts +client.esql.query({ query }) +``` +### Arguments [_arguments_esql.query] -## eql [_eql] +#### Request (object) [_request_esql.query] +- **`query` (string)**: The ES|QL query API accepts an ES|QL query string in the query parameter, runs it, and returns the results. +- **`columnar` (Optional, boolean)**: By default, ES|QL returns results as rows. For example, FROM returns each individual document as one row. For the JSON, YAML, CBOR and smile formats, ES|QL can return the results in a columnar fashion where one row represents all the values of a certain column in the results. +- **`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: Specify a Query DSL query in the filter parameter to filter the set of documents that an ES|QL query runs on. +- **`locale` (Optional, string)** +- **`params` (Optional, number | number | string | boolean | null[])**: To avoid any attempts of hacking or code injection, extract the values in a separate list of parameters. Use question mark placeholders (?) in the query string for each of the parameters. +- **`profile` (Optional, boolean)**: If provided and `true` the response will include an extra `profile` object +with information on how the query was executed. This information is for human debugging +and its format can change at any time but it can give some insight into the performance +of each part of the query. +- **`tables` (Optional, Record>)**: Tables to use with the LOOKUP operation. The top level key is the table +name and the next level key is the column name. +- **`include_ccs_metadata` (Optional, boolean)**: When set to `true` and performing a cross-cluster query, the response will include an extra `_clusters` +object with information about the clusters that participated in the search along with info such as shards +count. +- **`format` (Optional, Enum("csv" | "json" | "tsv" | "txt" | "yaml" | "cbor" | "smile" | "arrow"))**: A short version of the Accept header, e.g. json, yaml. +- **`delimiter` (Optional, string)**: The character to use between values within a CSV row. Only valid for the CSV format. +- **`drop_null_columns` (Optional, boolean)**: Should columns that are entirely `null` be removed from the `columns` and `values` portion of the results? +Defaults to `false`. If `true` then the response will include an extra section under the name `all_columns` which has the name of all columns. +## client.features.getFeatures [_features.get_features] +Get the features. +Get a list of features that can be included in snapshots using the `feature_states` field when creating a snapshot. +You can use this API to determine which feature states to include when taking a snapshot. +By default, all feature states are included in a snapshot if that snapshot includes the global state, or none if it does not. -### delete [_delete_4] +A feature state includes one or more system indices necessary for a given feature to function. +In order to ensure data integrity, all system indices that comprise a feature state are snapshotted and restored together. -Delete an async EQL search. Delete an async EQL search or a stored synchronous EQL search. The API also deletes results for the search. +The features listed by this API are a combination of built-in features and features defined by plugins. +In order for a feature state to be listed in this API and recognized as a valid feature state by the create snapshot API, the plugin that defines that feature must be installed on the master node. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-eql-search) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-features-get-features) ```ts -client.eql.delete({ id }) +client.features.getFeatures({ ... }) ``` +### Arguments [_arguments_features.get_features] -### Arguments [_arguments_138] +#### Request (object) [_request_features.get_features] +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. -* **Request (object):** +## client.features.resetFeatures [_features.reset_features] +Reset the features. +Clear all of the state information stored in system indices by Elasticsearch features, including the security and machine learning indices. - * **`id` (string)**: Identifier for the search to delete. A search ID is provided in the EQL search API’s response for an async search. A search ID is also provided if the request’s `keep_on_completion` parameter is `true`. +WARNING: Intended for development and testing use only. Do not reset features on a production cluster. +Return a cluster to the same state as a new installation by resetting the feature state for all Elasticsearch features. +This deletes all state information stored in system indices. +The response code is HTTP 200 if the state is successfully reset for all features. +It is HTTP 500 if the reset operation failed for any feature. -### get [_get_4] +Note that select features might provide a way to reset particular system indices. +Using this API resets all features, both those that are built-in and implemented as plugins. -Get async EQL search results. Get the current status and available results for an async EQL search or a stored synchronous EQL search. +To list the features that will be affected, use the get features API. + +IMPORTANT: The features installed on the node you submit this request to are the features that will be reset. Run on the master node if you have any doubts about which plugins are installed on individual nodes. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-eql-get) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-features-reset-features) ```ts -client.eql.get({ id }) +client.features.resetFeatures({ ... }) ``` +### Arguments [_arguments_features.reset_features] -### Arguments [_arguments_139] +#### Request (object) [_request_features.reset_features] +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. -* **Request (object):** +## client.fleet.globalCheckpoints [_fleet.global_checkpoints] +Get global checkpoints. - * **`id` (string)**: Identifier for the search. - * **`keep_alive` (Optional, string | -1 | 0)**: Period for which the search and its results are stored on the cluster. Defaults to the keep_alive value set by the search’s EQL search API request. - * **`wait_for_completion_timeout` (Optional, string | -1 | 0)**: Timeout duration to wait for the request to finish. Defaults to no timeout, meaning the request waits for complete search results. +Get the current global checkpoints for an index. +This API is designed for internal use by the Fleet server project. +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/group/endpoint-fleet) + +```ts +client.fleet.globalCheckpoints({ index }) +``` +### Arguments [_arguments_fleet.global_checkpoints] -### get_status [_get_status] +#### Request (object) [_request_fleet.global_checkpoints] +- **`index` (string | string)**: A single index or index alias that resolves to a single index. +- **`wait_for_advance` (Optional, boolean)**: A boolean value which controls whether to wait (until the timeout) for the global checkpoints +to advance past the provided `checkpoints`. +- **`wait_for_index` (Optional, boolean)**: A boolean value which controls whether to wait (until the timeout) for the target index to exist +and all primary shards be active. Can only be true when `wait_for_advance` is true. +- **`checkpoints` (Optional, number[])**: A comma separated list of previous global checkpoints. When used in combination with `wait_for_advance`, +the API will only return once the global checkpoints advances past the checkpoints. Providing an empty list +will cause Elasticsearch to immediately return the current global checkpoints. +- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a global checkpoints to advance past `checkpoints`. -Get the async EQL status. Get the current status for an async EQL search or a stored synchronous EQL search without returning results. +## client.fleet.msearch [_fleet.msearch] +Run multiple Fleet searches. +Run several Fleet searches with a single API request. +The API follows the same structure as the multi search API. +However, similar to the Fleet search API, it supports the `wait_for_checkpoints` parameter. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-eql-get-status) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-fleet-msearch) ```ts -client.eql.getStatus({ id }) +client.fleet.msearch({ ... }) ``` +### Arguments [_arguments_fleet.msearch] + +#### Request (object) [_request_fleet.msearch] +- **`index` (Optional, string | string)**: A single target to search. If the target is an index alias, it must resolve to a single index. +- **`searches` (Optional, { allow_no_indices, expand_wildcards, ignore_unavailable, index, preference, request_cache, routing, search_type, ccs_minimize_roundtrips, allow_partial_search_results, ignore_throttled } | { aggregations, collapse, explain, ext, from, highlight, track_total_hits, indices_boost, docvalue_fields, knn, rank, min_score, post_filter, profile, query, rescore, retriever, script_fields, search_after, size, slice, sort, _source, fields, suggest, terminate_after, timeout, track_scores, version, seq_no_primary_term, stored_fields, pit, runtime_mappings, stats }[])** +- **`allow_no_indices` (Optional, boolean)**: If false, the request returns an error if any wildcard expression, index alias, or _all value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting foo*,bar* returns an error if an index starts with foo but no index starts with bar. +- **`ccs_minimize_roundtrips` (Optional, boolean)**: If true, network roundtrips between the coordinating node and remote clusters are minimized for cross-cluster search requests. +- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard expressions can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +- **`ignore_throttled` (Optional, boolean)**: If true, concrete, expanded or aliased indices are ignored when frozen. +- **`ignore_unavailable` (Optional, boolean)**: If true, missing or closed indices are not included in the response. +- **`max_concurrent_searches` (Optional, number)**: Maximum number of concurrent searches the multi search API can execute. +- **`max_concurrent_shard_requests` (Optional, number)**: Maximum number of concurrent shard requests that each sub-search request executes per node. +- **`pre_filter_shard_size` (Optional, number)**: Defines a threshold that enforces a pre-filter roundtrip to prefilter search shards based on query rewriting if the number of shards the search request expands to exceeds the threshold. This filter roundtrip can limit the number of shards significantly if for instance a shard can not match any documents based on its rewrite method i.e., if date filters are mandatory to match but the shard bounds and the query are disjoint. +- **`search_type` (Optional, Enum("query_then_fetch" | "dfs_query_then_fetch"))**: Indicates whether global term and document frequencies should be used when scoring returned documents. +- **`rest_total_hits_as_int` (Optional, boolean)**: If true, hits.total are returned as an integer in the response. Defaults to false, which returns an object. +- **`typed_keys` (Optional, boolean)**: Specifies whether aggregation and suggester names should be prefixed by their respective types in the response. +- **`wait_for_checkpoints` (Optional, number[])**: A comma separated list of checkpoints. When configured, the search API will only be executed on a shard +after the relevant checkpoint has become visible for search. Defaults to an empty list which will cause +Elasticsearch to immediately execute the search. +- **`allow_partial_search_results` (Optional, boolean)**: If true, returns partial results if there are shard request timeouts or shard failures. +If false, returns an error with no partial results. +Defaults to the configured cluster setting `search.default_allow_partial_results`, which is true by default. + +## client.fleet.search [_fleet.search] +Run a Fleet search. +The purpose of the Fleet search API is to provide an API where the search will be run only +after the provided checkpoint has been processed and is visible for searches inside of Elasticsearch. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-fleet-search) -### Arguments [_arguments_140] - -* **Request (object):** +```ts +client.fleet.search({ index }) +``` - * **`id` (string)**: Identifier for the search. +### Arguments [_arguments_fleet.search] + +#### Request (object) [_request_fleet.search] +- **`index` (string | string)**: A single target to search. If the target is an index alias, it must resolve to a single index. +- **`aggregations` (Optional, Record)** +- **`collapse` (Optional, { field, inner_hits, max_concurrent_group_searches, collapse })** +- **`explain` (Optional, boolean)**: If true, returns detailed information about score computation as part of a hit. +- **`ext` (Optional, Record)**: Configuration of search extensions defined by Elasticsearch plugins. +- **`from` (Optional, number)**: Starting document offset. By default, you cannot page through more than 10,000 +hits using the from and size parameters. To page through more hits, use the +search_after parameter. +- **`highlight` (Optional, { encoder, fields })** +- **`track_total_hits` (Optional, boolean | number)**: Number of hits matching the query to count accurately. If true, the exact +number of hits is returned at the cost of some performance. If false, the +response does not include the total number of hits matching the query. +Defaults to 10,000 hits. +- **`indices_boost` (Optional, Record[])**: Boosts the _score of documents from specified indices. +- **`docvalue_fields` (Optional, { field, format, include_unmapped }[])**: Array of wildcard (*) patterns. The request returns doc values for field +names matching these patterns in the hits.fields property of the response. +- **`min_score` (Optional, number)**: Minimum _score for matching documents. Documents with a lower _score are +not included in search results and results collected by aggregations. +- **`post_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })** +- **`profile` (Optional, boolean)** +- **`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: Defines the search definition using the Query DSL. +- **`rescore` (Optional, { window_size, query, learning_to_rank } | { window_size, query, learning_to_rank }[])** +- **`script_fields` (Optional, Record)**: Retrieve a script evaluation (based on different fields) for each hit. +- **`search_after` (Optional, number | number | string | boolean | null[])** +- **`size` (Optional, number)**: The number of hits to return. By default, you cannot page through more +than 10,000 hits using the from and size parameters. To page through more +hits, use the search_after parameter. +- **`slice` (Optional, { field, id, max })** +- **`sort` (Optional, string | { _score, _doc, _geo_distance, _script } | string | { _score, _doc, _geo_distance, _script }[])** +- **`_source` (Optional, boolean | { excludes, includes })**: Indicates which source fields are returned for matching documents. These +fields are returned in the hits._source property of the search response. +- **`fields` (Optional, { field, format, include_unmapped }[])**: Array of wildcard (*) patterns. The request returns values for field names +matching these patterns in the hits.fields property of the response. +- **`suggest` (Optional, { text })** +- **`terminate_after` (Optional, number)**: Maximum number of documents to collect for each shard. If a query reaches this +limit, Elasticsearch terminates the query early. Elasticsearch collects documents +before sorting. Defaults to 0, which does not terminate query execution early. +- **`timeout` (Optional, string)**: Specifies the period of time to wait for a response from each shard. If no response +is received before the timeout expires, the request fails and returns an error. +Defaults to no timeout. +- **`track_scores` (Optional, boolean)**: If true, calculate and return document scores, even if the scores are not used for sorting. +- **`version` (Optional, boolean)**: If true, returns document version as part of a hit. +- **`seq_no_primary_term` (Optional, boolean)**: If true, returns sequence number and primary term of the last modification +of each hit. See Optimistic concurrency control. +- **`stored_fields` (Optional, string | string[])**: List of stored fields to return as part of a hit. If no fields are specified, +no stored fields are included in the response. If this field is specified, the _source +parameter defaults to false. You can pass _source: true to return both source fields +and stored fields in the search response. +- **`pit` (Optional, { id, keep_alive })**: Limits the search to a point in time (PIT). If you provide a PIT, you +cannot specify an in the request path. +- **`runtime_mappings` (Optional, Record)**: Defines one or more runtime fields in the search request. These fields take +precedence over mapped fields with the same name. +- **`stats` (Optional, string[])**: Stats groups to associate with the search. Each group maintains a statistics +aggregation for its associated searches. You can retrieve these stats using +the indices stats API. +- **`allow_no_indices` (Optional, boolean)** +- **`analyzer` (Optional, string)** +- **`analyze_wildcard` (Optional, boolean)** +- **`batched_reduce_size` (Optional, number)** +- **`ccs_minimize_roundtrips` (Optional, boolean)** +- **`default_operator` (Optional, Enum("and" | "or"))** +- **`df` (Optional, string)** +- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])** +- **`ignore_throttled` (Optional, boolean)** +- **`ignore_unavailable` (Optional, boolean)** +- **`lenient` (Optional, boolean)** +- **`max_concurrent_shard_requests` (Optional, number)** +- **`preference` (Optional, string)** +- **`pre_filter_shard_size` (Optional, number)** +- **`request_cache` (Optional, boolean)** +- **`routing` (Optional, string)** +- **`scroll` (Optional, string | -1 | 0)** +- **`search_type` (Optional, Enum("query_then_fetch" | "dfs_query_then_fetch"))** +- **`suggest_field` (Optional, string)**: Specifies which field to use for suggestions. +- **`suggest_mode` (Optional, Enum("missing" | "popular" | "always"))** +- **`suggest_size` (Optional, number)** +- **`suggest_text` (Optional, string)**: The source text for which the suggestions should be returned. +- **`typed_keys` (Optional, boolean)** +- **`rest_total_hits_as_int` (Optional, boolean)** +- **`_source_excludes` (Optional, string | string[])** +- **`_source_includes` (Optional, string | string[])** +- **`q` (Optional, string)** +- **`wait_for_checkpoints` (Optional, number[])**: A comma separated list of checkpoints. When configured, the search API will only be executed on a shard +after the relevant checkpoint has become visible for search. Defaults to an empty list which will cause +Elasticsearch to immediately execute the search. +- **`allow_partial_search_results` (Optional, boolean)**: If true, returns partial results if there are shard request timeouts or shard failures. +If false, returns an error with no partial results. +Defaults to the configured cluster setting `search.default_allow_partial_results`, which is true by default. + +## client.graph.explore [_graph.explore] +Explore graph analytics. +Extract and summarize information about the documents and terms in an Elasticsearch data stream or index. +The easiest way to understand the behavior of this API is to use the Graph UI to explore connections. +An initial request to the `_explore` API contains a seed query that identifies the documents of interest and specifies the fields that define the vertices and connections you want to include in the graph. +Subsequent requests enable you to spider out from one more vertices of interest. +You can exclude vertices that have already been returned. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/group/endpoint-graph) +```ts +client.graph.explore({ index }) +``` +### Arguments [_arguments_graph.explore] -### search [_search_2] +#### Request (object) [_request_graph.explore] +- **`index` (string | string[])**: Name of the index. +- **`connections` (Optional, { connections, query, vertices })**: Specifies or more fields from which you want to extract terms that are associated with the specified vertices. +- **`controls` (Optional, { sample_diversity, sample_size, timeout, use_significance })**: Direct the Graph API how to build the graph. +- **`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: A seed query that identifies the documents of interest. Can be any valid Elasticsearch query. +- **`vertices` (Optional, { exclude, field, include, min_doc_count, shard_min_doc_count, size }[])**: Specifies one or more fields that contain the terms you want to include in the graph as vertices. +- **`routing` (Optional, string)**: Custom value used to route operations to a specific shard. +- **`timeout` (Optional, string | -1 | 0)**: Specifies the period of time to wait for a response from each shard. +If no response is received before the timeout expires, the request fails and returns an error. +Defaults to no timeout. -Get EQL search results. Returns search results for an Event Query Language (EQL) query. EQL assumes each document in a data stream or index corresponds to an event. +## client.ilm.deleteLifecycle [_ilm.delete_lifecycle] +Delete a lifecycle policy. +You cannot delete policies that are currently in use. If the policy is being used to manage any indices, the request fails and returns an error. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-eql-search) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ilm-delete-lifecycle) ```ts -client.eql.search({ index, query }) +client.ilm.deleteLifecycle({ policy }) ``` +### Arguments [_arguments_ilm.delete_lifecycle] -### Arguments [_arguments_141] - -* **Request (object):** - - * **`index` (string | string[])**: The name of the index to scope the operation - * **`query` (string)**: EQL query you wish to run. - * **`case_sensitive` (Optional, boolean)** - * **`event_category_field` (Optional, string)**: Field containing the event classification, such as process, file, or network. - * **`tiebreaker_field` (Optional, string)**: Field used to sort hits with the same timestamp in ascending order - * **`timestamp_field` (Optional, string)**: Field containing event timestamp. Default "@timestamp" - * **`fetch_size` (Optional, number)**: Maximum number of events to search at a time for sequence queries. - * **`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type } | { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type }[])**: Query, written in Query DSL, used to filter the events on which the EQL query runs. - * **`keep_alive` (Optional, string | -1 | 0)** - * **`keep_on_completion` (Optional, boolean)** - * **`wait_for_completion_timeout` (Optional, string | -1 | 0)** - * **`allow_partial_search_results` (Optional, boolean)** - * **`allow_partial_sequence_results` (Optional, boolean)** - * **`size` (Optional, number)**: For basic queries, the maximum number of matching events to return. Defaults to 10 - * **`fields` (Optional, { field, format, include_unmapped } | { field, format, include_unmapped }[])**: Array of wildcard (*) patterns. The response returns values for field names matching these patterns in the fields property of each hit. - * **`result_position` (Optional, Enum("tail" | "head"))** - * **`runtime_mappings` (Optional, Record)** - * **`max_samples_per_key` (Optional, number)**: By default, the response of a sample query contains up to `10` samples, with one sample per unique set of join keys. Use the `size` parameter to get a smaller or larger set of samples. To retrieve more than one sample per set of join keys, use the `max_samples_per_key` parameter. Pipes are not supported for sample queries. - * **`allow_no_indices` (Optional, boolean)** - * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])** - * **`ignore_unavailable` (Optional, boolean)**: If true, missing or closed indices are not included in the response. +#### Request (object) [_request_ilm.delete_lifecycle] +- **`policy` (string)**: Identifier for the policy. +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +## client.ilm.explainLifecycle [_ilm.explain_lifecycle] +Explain the lifecycle state. +Get the current lifecycle status for one or more indices. +For data streams, the API retrieves the current lifecycle status for the stream's backing indices. +The response indicates when the index entered each lifecycle state, provides the definition of the running phase, and information about any failures. -## esql [_esql] +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ilm-explain-lifecycle) +```ts +client.ilm.explainLifecycle({ index }) +``` -### async_query [_async_query] +### Arguments [_arguments_ilm.explain_lifecycle] -Run an async ES|QL query. Asynchronously run an ES|QL (Elasticsearch query language) query, monitor its progress, and retrieve results when they become available. +#### Request (object) [_request_ilm.explain_lifecycle] +- **`index` (string)**: List of data streams, indices, and aliases to target. Supports wildcards (`*`). +To target all data streams and indices, use `*` or `_all`. +- **`only_errors` (Optional, boolean)**: Filters the returned indices to only indices that are managed by ILM and are in an error state, either due to an encountering an error while executing the policy, or attempting to use a policy that does not exist. +- **`only_managed` (Optional, boolean)**: Filters the returned indices to only indices that are managed by ILM. +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. -The API accepts the same parameters and request body as the synchronous query API, along with additional async related properties. +## client.ilm.getLifecycle [_ilm.get_lifecycle] +Get lifecycle policies. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-esql-async-query) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ilm-get-lifecycle) ```ts -client.esql.asyncQuery({ query }) +client.ilm.getLifecycle({ ... }) ``` +### Arguments [_arguments_ilm.get_lifecycle] + +#### Request (object) [_request_ilm.get_lifecycle] +- **`policy` (Optional, string)**: Identifier for the policy. +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. -### Arguments [_arguments_142] +## client.ilm.getStatus [_ilm.get_status] +Get the ILM status. -* **Request (object):** +Get the current index lifecycle management status. - * **`query` (string)**: The ES|QL query API accepts an ES|QL query string in the query parameter, runs it, and returns the results. - * **`columnar` (Optional, boolean)**: By default, ES|QL returns results as rows. For example, FROM returns each individual document as one row. For the JSON, YAML, CBOR and smile formats, ES|QL can return the results in a columnar fashion where one row represents all the values of a certain column in the results. - * **`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: Specify a Query DSL query in the filter parameter to filter the set of documents that an ES|QL query runs on. - * **`locale` (Optional, string)** - * **`params` (Optional, number | number | string | boolean | null | User-defined value[])**: To avoid any attempts of hacking or code injection, extract the values in a separate list of parameters. Use question mark placeholders (?) in the query string for each of the parameters. - * **`profile` (Optional, boolean)**: If provided and `true` the response will include an extra `profile` object with information on how the query was executed. This information is for human debugging and its format can change at any time but it can give some insight into the performance of each part of the query. - * **`tables` (Optional, Record>)**: Tables to use with the LOOKUP operation. The top level key is the table name and the next level key is the column name. - * **`delimiter` (Optional, string)**: The character to use between values within a CSV row. It is valid only for the CSV format. - * **`drop_null_columns` (Optional, boolean)**: Indicates whether columns that are entirely `null` will be removed from the `columns` and `values` portion of the results. If `true`, the response will include an extra section under the name `all_columns` which has the name of all the columns. - * **`format` (Optional, Enum("csv" | "json" | "tsv" | "txt" | "yaml" | "cbor" | "smile" | "arrow"))**: A short version of the Accept header, for example `json` or `yaml`. - * **`keep_alive` (Optional, string | -1 | 0)**: The period for which the query and its results are stored in the cluster. The default period is five days. When this period expires, the query and its results are deleted, even if the query is still ongoing. If the `keep_on_completion` parameter is false, Elasticsearch only stores async queries that do not complete within the period set by the `wait_for_completion_timeout` parameter, regardless of this value. - * **`keep_on_completion` (Optional, boolean)**: Indicates whether the query and its results are stored in the cluster. If false, the query and its results are stored in the cluster only if the request does not complete during the period set by the `wait_for_completion_timeout` parameter. - * **`wait_for_completion_timeout` (Optional, string | -1 | 0)**: The period to wait for the request to finish. By default, the request waits for 1 second for the query results. If the query completes during this period, results are returned Otherwise, a query ID is returned that can later be used to retrieve the results. +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ilm-get-status) +```ts +client.ilm.getStatus() +``` -### async_query_delete [_async_query_delete] +## client.ilm.migrateToDataTiers [_ilm.migrate_to_data_tiers] +Migrate to data tiers routing. +Switch the indices, ILM policies, and legacy, composable, and component templates from using custom node attributes and attribute-based allocation filters to using data tiers. +Optionally, delete one legacy index template. +Using node roles enables ILM to automatically move the indices between data tiers. -Delete an async ES|QL query. If the query is still running, it is cancelled. Otherwise, the stored results are deleted. +Migrating away from custom node attributes routing can be manually performed. +This API provides an automated way of performing three out of the four manual steps listed in the migration guide: -If the Elasticsearch security features are enabled, only the following users can use this API to delete a query: +1. Stop setting the custom hot attribute on new indices. +1. Remove custom allocation settings from existing ILM policies. +1. Replace custom allocation settings from existing indices with the corresponding tier preference. -* The authenticated user that submitted the original query request -* Users with the `cancel_task` cluster privilege +ILM must be stopped before performing the migration. +Use the stop ILM and get ILM status APIs to wait until the reported operation mode is `STOPPED`. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-esql-async-query-delete) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ilm-migrate-to-data-tiers) ```ts -client.esql.asyncQueryDelete({ id }) +client.ilm.migrateToDataTiers({ ... }) ``` +### Arguments [_arguments_ilm.migrate_to_data_tiers] -### Arguments [_arguments_143] - -* **Request (object):** +#### Request (object) [_request_ilm.migrate_to_data_tiers] +- **`legacy_template_to_delete` (Optional, string)** +- **`node_attribute` (Optional, string)** +- **`dry_run` (Optional, boolean)**: If true, simulates the migration from node attributes based allocation filters to data tiers, but does not perform the migration. +This provides a way to retrieve the indices and ILM policies that need to be migrated. +- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +It can also be set to `-1` to indicate that the request should never timeout. - * **`id` (string)**: The unique identifier of the query. A query ID is provided in the ES|QL async query API response for a query that does not complete in the designated time. A query ID is also provided when the request was submitted with the `keep_on_completion` parameter set to `true`. +## client.ilm.moveToStep [_ilm.move_to_step] +Move to a lifecycle step. +Manually move an index into a specific step in the lifecycle policy and run that step. +WARNING: This operation can result in the loss of data. Manually moving an index into a specific step runs that step even if it has already been performed. This is a potentially destructive action and this should be considered an expert level API. +You must specify both the current step and the step to be executed in the body of the request. +The request will fail if the current step does not match the step currently running for the index +This is to prevent the index from being moved from an unexpected step into the next step. -### async_query_get [_async_query_get] +When specifying the target (`next_step`) to which the index will be moved, either the name or both the action and name fields are optional. +If only the phase is specified, the index will move to the first step of the first action in the target phase. +If the phase and action are specified, the index will move to the first step of the specified action in the specified phase. +Only actions specified in the ILM policy are considered valid. +An index cannot move to a step that is not part of its policy. -Get async ES|QL query results. Get the current status and available results or stored results for an ES|QL asynchronous query. If the Elasticsearch security features are enabled, only the user who first submitted the ES|QL query can retrieve the results using this API. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-esql-async-query-get) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ilm-move-to-step) ```ts -client.esql.asyncQueryGet({ id }) +client.ilm.moveToStep({ index, current_step, next_step }) ``` +### Arguments [_arguments_ilm.move_to_step] + +#### Request (object) [_request_ilm.move_to_step] +- **`index` (string)**: The name of the index whose lifecycle step is to change +- **`current_step` ({ action, name, phase })**: The step that the index is expected to be in. +- **`next_step` ({ action, name, phase })**: The step that you want to run. -### Arguments [_arguments_144] +## client.ilm.putLifecycle [_ilm.put_lifecycle] +Create or update a lifecycle policy. +If the specified policy exists, it is replaced and the policy version is incremented. -* **Request (object):** +NOTE: Only the latest version of the policy is stored, you cannot revert to previous versions. - * **`id` (string)**: The unique identifier of the query. A query ID is provided in the ES|QL async query API response for a query that does not complete in the designated time. A query ID is also provided when the request was submitted with the `keep_on_completion` parameter set to `true`. - * **`drop_null_columns` (Optional, boolean)**: Indicates whether columns that are entirely `null` will be removed from the `columns` and `values` portion of the results. If `true`, the response will include an extra section under the name `all_columns` which has the name of all the columns. - * **`keep_alive` (Optional, string | -1 | 0)**: The period for which the query and its results are stored in the cluster. When this period expires, the query and its results are deleted, even if the query is still ongoing. - * **`wait_for_completion_timeout` (Optional, string | -1 | 0)**: The period to wait for the request to finish. By default, the request waits for complete query results. If the request completes during the period specified in this parameter, complete query results are returned. Otherwise, the response returns an `is_running` value of `true` and no results. +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ilm-put-lifecycle) +```ts +client.ilm.putLifecycle({ policy }) +``` +### Arguments [_arguments_ilm.put_lifecycle] -### query [_query] +#### Request (object) [_request_ilm.put_lifecycle] +- **`policy` (string)**: Identifier for the policy. +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. -Run an ES|QL query. Get search results for an ES|QL (Elasticsearch query language) query. +## client.ilm.removePolicy [_ilm.remove_policy] +Remove policies from an index. +Remove the assigned lifecycle policies from an index or a data stream's backing indices. +It also stops managing the indices. -[Endpoint documentation](docs-content://explore-analyze/query-filter/languages/esql-rest.md) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ilm-remove-policy) ```ts -client.esql.query({ query }) +client.ilm.removePolicy({ index }) ``` +### Arguments [_arguments_ilm.remove_policy] + +#### Request (object) [_request_ilm.remove_policy] +- **`index` (string)**: The name of the index to remove policy on -### Arguments [_arguments_145] +## client.ilm.retry [_ilm.retry] +Retry a policy. +Retry running the lifecycle policy for an index that is in the ERROR step. +The API sets the policy back to the step where the error occurred and runs the step. +Use the explain lifecycle state API to determine whether an index is in the ERROR step. -* **Request (object):** +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ilm-retry) + +```ts +client.ilm.retry({ index }) +``` - * **`query` (string)**: The ES|QL query API accepts an ES|QL query string in the query parameter, runs it, and returns the results. - * **`columnar` (Optional, boolean)**: By default, ES|QL returns results as rows. For example, FROM returns each individual document as one row. For the JSON, YAML, CBOR and smile formats, ES|QL can return the results in a columnar fashion where one row represents all the values of a certain column in the results. - * **`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: Specify a Query DSL query in the filter parameter to filter the set of documents that an ES|QL query runs on. - * **`locale` (Optional, string)** - * **`params` (Optional, number | number | string | boolean | null | User-defined value[])**: To avoid any attempts of hacking or code injection, extract the values in a separate list of parameters. Use question mark placeholders (?) in the query string for each of the parameters. - * **`profile` (Optional, boolean)**: If provided and `true` the response will include an extra `profile` object with information on how the query was executed. This information is for human debugging and its format can change at any time but it can give some insight into the performance of each part of the query. - * **`tables` (Optional, Record>)**: Tables to use with the LOOKUP operation. The top level key is the table name and the next level key is the column name. - * **`format` (Optional, Enum("csv" | "json" | "tsv" | "txt" | "yaml" | "cbor" | "smile" | "arrow"))**: A short version of the Accept header, e.g. json, yaml. - * **`delimiter` (Optional, string)**: The character to use between values within a CSV row. Only valid for the CSV format. - * **`drop_null_columns` (Optional, boolean)**: Should columns that are entirely `null` be removed from the `columns` and `values` portion of the results? Defaults to `false`. If `true` then the response will include an extra section under the name `all_columns` which has the name of all columns. +### Arguments [_arguments_ilm.retry] +#### Request (object) [_request_ilm.retry] +- **`index` (string)**: The name of the indices (comma-separated) whose failed lifecycle step is to be retry +## client.ilm.start [_ilm.start] +Start the ILM plugin. +Start the index lifecycle management plugin if it is currently stopped. +ILM is started automatically when the cluster is formed. +Restarting ILM is necessary only when it has been stopped using the stop ILM API. -## features [_features_17] +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ilm-start) +```ts +client.ilm.start({ ... }) +``` -### get_features [_get_features] +### Arguments [_arguments_ilm.start] -Get the features. Get a list of features that can be included in snapshots using the `feature_states` field when creating a snapshot. You can use this API to determine which feature states to include when taking a snapshot. By default, all feature states are included in a snapshot if that snapshot includes the global state, or none if it does not. +#### Request (object) [_request_ilm.start] +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. -A feature state includes one or more system indices necessary for a given feature to function. In order to ensure data integrity, all system indices that comprise a feature state are snapshotted and restored together. +## client.ilm.stop [_ilm.stop] +Stop the ILM plugin. +Halt all lifecycle management operations and stop the index lifecycle management plugin. +This is useful when you are performing maintenance on the cluster and need to prevent ILM from performing any actions on your indices. -The features listed by this API are a combination of built-in features and features defined by plugins. In order for a feature state to be listed in this API and recognized as a valid feature state by the create snapshot API, the plugin that defines that feature must be installed on the master node. +The API returns as soon as the stop request has been acknowledged, but the plugin might continue to run until in-progress operations complete and the plugin can be safely stopped. +Use the get ILM status API to check whether ILM is running. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-features-get-features) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ilm-stop) ```ts -client.features.getFeatures({ ... }) +client.ilm.stop({ ... }) ``` +### Arguments [_arguments_ilm.stop] -### Arguments [_arguments_146] +#### Request (object) [_request_ilm.stop] +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. -* **Request (object):** +## client.indices.addBlock [_indices.add_block] +Add an index block. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +Add an index block to an index. +Index blocks limit the operations allowed on an index by blocking specific operation types. +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-indices-add-block) +```ts +client.indices.addBlock({ index, block }) +``` -### reset_features [_reset_features] +### Arguments [_arguments_indices.add_block] + +#### Request (object) [_request_indices.add_block] +- **`index` (string)**: A list or wildcard expression of index names used to limit the request. +By default, you must explicitly name the indices you are adding blocks to. +To allow the adding of blocks to indices with `_all`, `*`, or other wildcard expressions, change the `action.destructive_requires_name` setting to `false`. +You can update this setting in the `elasticsearch.yml` file or by using the cluster update settings API. +- **`block` (Enum("metadata" | "read" | "read_only" | "write"))**: The block type to add to the index. +- **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. +This behavior applies even if the request targets other open indices. +For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. +- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: The type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +It supports a list of values, such as `open,hidden`. +- **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. +- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for the master node. +If the master node is not available before the timeout expires, the request fails and returns an error. +It can also be set to `-1` to indicate that the request should never timeout. +- **`timeout` (Optional, string | -1 | 0)**: The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. +If no response is received before the timeout expires, the cluster metadata update still applies but the response will indicate that it was not completely acknowledged. +It can also be set to `-1` to indicate that the request should never timeout. + +## client.indices.analyze [_indices.analyze] +Get tokens from text analysis. +The analyze API performs analysis on a text string and returns the resulting tokens. + +Generating excessive amount of tokens may cause a node to run out of memory. +The `index.analyze.max_token_count` setting enables you to limit the number of tokens that can be produced. +If more than this limit of tokens gets generated, an error occurs. +The `_analyze` endpoint without a specified index will always use `10000` as its limit. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-indices-analyze) -Reset the features. Clear all of the state information stored in system indices by Elasticsearch features, including the security and machine learning indices. +```ts +client.indices.analyze({ ... }) +``` -::::{warning} -Intended for development and testing use only. Do not reset features on a production cluster. -:::: +### Arguments [_arguments_indices.analyze] + +#### Request (object) [_request_indices.analyze] +- **`index` (Optional, string)**: Index used to derive the analyzer. +If specified, the `analyzer` or field parameter overrides this value. +If no index is specified or the index does not have a default analyzer, the analyze API uses the standard analyzer. +- **`analyzer` (Optional, string)**: The name of the analyzer that should be applied to the provided `text`. +This could be a built-in analyzer, or an analyzer that’s been configured in the index. +- **`attributes` (Optional, string[])**: Array of token attributes used to filter the output of the `explain` parameter. +- **`char_filter` (Optional, string | { type, escaped_tags } | { type, mappings, mappings_path } | { type, flags, pattern, replacement } | { type, mode, name } | { type, normalize_kana, normalize_kanji }[])**: Array of character filters used to preprocess characters before the tokenizer. +- **`explain` (Optional, boolean)**: If `true`, the response includes token attributes and additional details. +- **`field` (Optional, string)**: Field used to derive the analyzer. +To use this parameter, you must specify an index. +If specified, the `analyzer` parameter overrides this value. +- **`filter` (Optional, string | { type, preserve_original } | { type, common_words, common_words_path, ignore_case, query_mode } | { type, filter, script } | { type, delimiter, encoding } | { type, max_gram, min_gram, side, preserve_original } | { type, articles, articles_path, articles_case } | { type, max_output_size, separator } | { type, dedup, dictionary, locale, longest_only } | { type } | { type, mode, types } | { type, keep_words, keep_words_case, keep_words_path } | { type, ignore_case, keywords, keywords_path, keywords_pattern } | { type } | { type, max, min } | { type, consume_all_tokens, max_token_count } | { type, language } | { type, filters, preserve_original } | { type, max_gram, min_gram, preserve_original } | { type, stoptags } | { type, patterns, preserve_original } | { type, all, flags, pattern, replacement } | { type } | { type, script } | { type } | { type } | { type, filler_token, max_shingle_size, min_shingle_size, output_unigrams, output_unigrams_if_no_shingles, token_separator } | { type, language } | { type, rules, rules_path } | { type, language } | { type, ignore_case, remove_trailing, stopwords, stopwords_path } | { type, expand, format, lenient, synonyms, synonyms_path, synonyms_set, tokenizer, updateable } | { type, expand, format, lenient, synonyms, synonyms_path, synonyms_set, tokenizer, updateable } | { type } | { type, length } | { type, only_on_same_position } | { type } | { type, adjust_offsets, catenate_all, catenate_numbers, catenate_words, generate_number_parts, generate_word_parts, ignore_keywords, preserve_original, protected_words, protected_words_path, split_on_case_change, split_on_numerics, stem_english_possessive, type_table, type_table_path } | { type, catenate_all, catenate_numbers, catenate_words, generate_number_parts, generate_word_parts, preserve_original, protected_words, protected_words_path, split_on_case_change, split_on_numerics, stem_english_possessive, type_table, type_table_path } | { type, minimum_length } | { type, use_romaji } | { type, stoptags } | { type, alternate, case_first, case_level, country, decomposition, hiragana_quaternary_mode, language, numeric, rules, strength, variable_top, variant } | { type, unicode_set_filter } | { type, name } | { type, dir, id } | { type, encoder, languageset, max_code_len, name_type, replace, rule_type } | { type }[])**: Array of token filters used to apply after the tokenizer. +- **`normalizer` (Optional, string)**: Normalizer to use to convert text into a single token. +- **`text` (Optional, string | string[])**: Text to analyze. +If an array of strings is provided, it is analyzed as a multi-value field. +- **`tokenizer` (Optional, string | { type, tokenize_on_chars, max_token_length } | { type, max_token_length } | { type, custom_token_chars, max_gram, min_gram, token_chars } | { type, buffer_size } | { type } | { type } | { type, custom_token_chars, max_gram, min_gram, token_chars } | { type, buffer_size, delimiter, replacement, reverse, skip } | { type, flags, group, pattern } | { type, pattern } | { type, pattern } | { type, max_token_length } | { type } | { type, max_token_length } | { type, max_token_length } | { type, rule_files } | { type, discard_punctuation, mode, nbest_cost, nbest_examples, user_dictionary, user_dictionary_rules, discard_compound_token } | { type, decompound_mode, discard_punctuation, user_dictionary, user_dictionary_rules })**: Tokenizer to use to convert text into tokens. + +## client.indices.cancelMigrateReindex [_indices.cancel_migrate_reindex] +Cancel a migration reindex operation. +Cancel a migration reindex attempt for a data stream or index. -Return a cluster to the same state as a new installation by resetting the feature state for all Elasticsearch features. This deletes all state information stored in system indices. +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-indices-cancel-migrate-reindex) -The response code is HTTP 200 if the state is successfully reset for all features. It is HTTP 500 if the reset operation failed for any feature. +```ts +client.indices.cancelMigrateReindex({ index }) +``` -Note that select features might provide a way to reset particular system indices. Using this API resets all features, both those that are built-in and implemented as plugins. +### Arguments [_arguments_indices.cancel_migrate_reindex] -To list the features that will be affected, use the get features API. +#### Request (object) [_request_indices.cancel_migrate_reindex] +- **`index` (string | string[])**: The index or data stream name -::::{important} -The features installed on the node you submit this request to are the features that will be reset. Run on the master node if you have any doubts about which plugins are installed on individual nodes. -:::: +## client.indices.clearCache [_indices.clear_cache] +Clear the cache. +Clear the cache of one or more indices. +For data streams, the API clears the caches of the stream's backing indices. +By default, the clear cache API clears all caches. +To clear only specific caches, use the `fielddata`, `query`, or `request` parameters. +To clear the cache only of specific fields, use the `fields` parameter. -[Endpoint documentation](docs-content://deploy-manage/tools/snapshot-and-restore.md) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-indices-clear-cache) ```ts -client.features.resetFeatures({ ... }) +client.indices.clearCache({ ... }) ``` +### Arguments [_arguments_indices.clear_cache] + +#### Request (object) [_request_indices.clear_cache] +- **`index` (Optional, string | string[])**: List of data streams, indices, and aliases used to limit the request. +Supports wildcards (`*`). +To target all data streams and indices, omit this parameter or use `*` or `_all`. +- **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. +This behavior applies even if the request targets other open indices. +- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +Supports a list of values, such as `open,hidden`. +Valid values are: `all`, `open`, `closed`, `hidden`, `none`. +- **`fielddata` (Optional, boolean)**: If `true`, clears the fields cache. +Use the `fields` parameter to clear the cache of specific fields only. +- **`fields` (Optional, string | string[])**: List of field names used to limit the `fielddata` parameter. +- **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. +- **`query` (Optional, boolean)**: If `true`, clears the query cache. +- **`request` (Optional, boolean)**: If `true`, clears the request cache. + +## client.indices.clone [_indices.clone] +Clone an index. +Clone an existing index into a new index. +Each original primary shard is cloned into a new primary shard in the new index. + +IMPORTANT: Elasticsearch does not apply index templates to the resulting index. +The API also does not copy index metadata from the original index. +Index metadata includes aliases, index lifecycle management phase definitions, and cross-cluster replication (CCR) follower information. +For example, if you clone a CCR follower index, the resulting clone will not be a follower index. + +The clone API copies most index settings from the source index to the resulting index, with the exception of `index.number_of_replicas` and `index.auto_expand_replicas`. +To set the number of replicas in the resulting index, configure these settings in the clone request. + +Cloning works as follows: + +* First, it creates a new target index with the same definition as the source index. +* Then it hard-links segments from the source index into the target index. If the file system does not support hard-linking, all segments are copied into the new index, which is a much more time consuming process. +* Finally, it recovers the target index as though it were a closed index which had just been re-opened. + +IMPORTANT: Indices can only be cloned if they meet the following requirements: -### Arguments [_arguments_147] +* The index must be marked as read-only and have a cluster health status of green. +* The target index must not exist. +* The source index must have the same number of primary shards as the target index. +* The node handling the clone process must have sufficient free disk space to accommodate a second copy of the existing index. -* **Request (object):** +The current write index on a data stream cannot be cloned. +In order to clone the current write index, the data stream must first be rolled over so that a new write index is created and then the previous write index can be cloned. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +NOTE: Mappings cannot be specified in the `_clone` request. The mappings of the source index will be used for the target index. +**Monitor the cloning process** +The cloning process can be monitored with the cat recovery API or the cluster health API can be used to wait until all primary shards have been allocated by setting the `wait_for_status` parameter to `yellow`. -## fleet [_fleet] +The `_clone` API returns as soon as the target index has been added to the cluster state, before any shards have been allocated. +At this point, all shards are in the state unassigned. +If, for any reason, the target index can't be allocated, its primary shard will remain unassigned until it can be allocated on that node. +Once the primary shard is allocated, it moves to state initializing, and the clone process begins. +When the clone operation completes, the shard will become active. +At that point, Elasticsearch will try to allocate any replicas and may decide to relocate the primary shard to another node. -### global_checkpoints [_global_checkpoints] +**Wait for active shards** -Get global checkpoints. Get the current global checkpoints for an index. This API is designed for internal use by the Fleet server project. +Because the clone operation creates a new index to clone the shards to, the wait for active shards setting on index creation applies to the clone index action as well. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-fleet) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-indices-clone) ```ts -client.fleet.globalCheckpoints({ index }) +client.indices.clone({ index, target }) ``` +### Arguments [_arguments_indices.clone] -### Arguments [_arguments_148] +#### Request (object) [_request_indices.clone] +- **`index` (string)**: Name of the source index to clone. +- **`target` (string)**: Name of the target index to create. +- **`aliases` (Optional, Record)**: Aliases for the resulting index. +- **`settings` (Optional, Record)**: Configuration options for the target index. +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. +- **`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))**: The number of shard copies that must be active before proceeding with the operation. +Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). -* **Request (object):** +## client.indices.close [_indices.close] +Close an index. +A closed index is blocked for read or write operations and does not allow all operations that opened indices allow. +It is not possible to index documents or to search for documents in a closed index. +Closed indices do not have to maintain internal data structures for indexing or searching documents, which results in a smaller overhead on the cluster. - * **`index` (string | string)**: A single index or index alias that resolves to a single index. - * **`wait_for_advance` (Optional, boolean)**: A boolean value which controls whether to wait (until the timeout) for the global checkpoints to advance past the provided `checkpoints`. - * **`wait_for_index` (Optional, boolean)**: A boolean value which controls whether to wait (until the timeout) for the target index to exist and all primary shards be active. Can only be true when `wait_for_advance` is true. - * **`checkpoints` (Optional, number[])**: A comma separated list of previous global checkpoints. When used in combination with `wait_for_advance`, the API will only return once the global checkpoints advances past the checkpoints. Providing an empty list will cause Elasticsearch to immediately return the current global checkpoints. - * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a global checkpoints to advance past `checkpoints`. +When opening or closing an index, the master node is responsible for restarting the index shards to reflect the new state of the index. +The shards will then go through the normal recovery process. +The data of opened and closed indices is automatically replicated by the cluster to ensure that enough shard copies are safely kept around at all times. +You can open and close multiple indices. +An error is thrown if the request explicitly refers to a missing index. +This behaviour can be turned off using the `ignore_unavailable=true` parameter. +By default, you must explicitly name the indices you are opening or closing. +To open or close indices with `_all`, `*`, or other wildcard expressions, change the` action.destructive_requires_name` setting to `false`. This setting can also be changed with the cluster update settings API. -### msearch [_msearch_2] +Closed indices consume a significant amount of disk-space which can cause problems in managed environments. +Closing indices can be turned off with the cluster settings API by setting `cluster.indices.close.enable` to `false`. -Run multiple Fleet searches. Run several Fleet searches with a single API request. The API follows the same structure as the multi search API. However, similar to the Fleet search API, it supports the `wait_for_checkpoints` parameter. +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-indices-close) ```ts -client.fleet.msearch({ ... }) +client.indices.close({ index }) ``` +### Arguments [_arguments_indices.close] + +#### Request (object) [_request_indices.close] +- **`index` (string | string[])**: List or wildcard expression of index names used to limit the request. +- **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. +This behavior applies even if the request targets other open indices. +- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +Supports a list of values, such as `open,hidden`. +Valid values are: `all`, `open`, `closed`, `hidden`, `none`. +- **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. +- **`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))**: The number of shard copies that must be active before proceeding with the operation. +Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). + +## client.indices.create [_indices.create] +Create an index. +You can use the create index API to add a new index to an Elasticsearch cluster. +When creating an index, you can specify the following: -### Arguments [_arguments_149] - -* **Request (object):** - - * **`index` (Optional, string | string)**: A single target to search. If the target is an index alias, it must resolve to a single index. - * **`searches` (Optional, { allow_no_indices, expand_wildcards, ignore_unavailable, index, preference, request_cache, routing, search_type, ccs_minimize_roundtrips, allow_partial_search_results, ignore_throttled } | { aggregations, collapse, query, explain, ext, stored_fields, docvalue_fields, knn, from, highlight, indices_boost, min_score, post_filter, profile, rescore, script_fields, search_after, size, sort, _source, fields, terminate_after, stats, timeout, track_scores, track_total_hits, version, runtime_mappings, seq_no_primary_term, pit, suggest }[])** - * **`allow_no_indices` (Optional, boolean)**: If false, the request returns an error if any wildcard expression, index alias, or _all value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting foo*,bar* returns an error if an index starts with foo but no index starts with bar. - * **`ccs_minimize_roundtrips` (Optional, boolean)**: If true, network roundtrips between the coordinating node and remote clusters are minimized for cross-cluster search requests. - * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard expressions can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. - * **`ignore_throttled` (Optional, boolean)**: If true, concrete, expanded or aliased indices are ignored when frozen. - * **`ignore_unavailable` (Optional, boolean)**: If true, missing or closed indices are not included in the response. - * **`max_concurrent_searches` (Optional, number)**: Maximum number of concurrent searches the multi search API can execute. - * **`max_concurrent_shard_requests` (Optional, number)**: Maximum number of concurrent shard requests that each sub-search request executes per node. - * **`pre_filter_shard_size` (Optional, number)**: Defines a threshold that enforces a pre-filter roundtrip to prefilter search shards based on query rewriting if the number of shards the search request expands to exceeds the threshold. This filter roundtrip can limit the number of shards significantly if for instance a shard can not match any documents based on its rewrite method i.e., if date filters are mandatory to match but the shard bounds and the query are disjoint. - * **`search_type` (Optional, Enum("query_then_fetch" | "dfs_query_then_fetch"))**: Indicates whether global term and document frequencies should be used when scoring returned documents. - * **`rest_total_hits_as_int` (Optional, boolean)**: If true, hits.total are returned as an integer in the response. Defaults to false, which returns an object. - * **`typed_keys` (Optional, boolean)**: Specifies whether aggregation and suggester names should be prefixed by their respective types in the response. - * **`wait_for_checkpoints` (Optional, number[])**: A comma separated list of checkpoints. When configured, the search API will only be executed on a shard after the relevant checkpoint has become visible for search. Defaults to an empty list which will cause Elasticsearch to immediately execute the search. - * **`allow_partial_search_results` (Optional, boolean)**: If true, returns partial results if there are shard request timeouts or [shard failures](docs-content://deploy-manage/distributed-architecture/reading-and-writing-documents.md#shard-failures). If false, returns an error with no partial results. Defaults to the configured cluster setting `search.default_allow_partial_results` which is true by default. +* Settings for the index. +* Mappings for fields in the index. +* Index aliases +**Wait for active shards** +By default, index creation will only return a response to the client when the primary copies of each shard have been started, or the request times out. +The index creation response will indicate what happened. +For example, `acknowledged` indicates whether the index was successfully created in the cluster, `while shards_acknowledged` indicates whether the requisite number of shard copies were started for each shard in the index before timing out. +Note that it is still possible for either `acknowledged` or `shards_acknowledged` to be `false`, but for the index creation to be successful. +These values simply indicate whether the operation completed before the timeout. +If `acknowledged` is false, the request timed out before the cluster state was updated with the newly created index, but it probably will be created sometime soon. +If `shards_acknowledged` is false, then the request timed out before the requisite number of shards were started (by default just the primaries), even if the cluster state was successfully updated to reflect the newly created index (that is to say, `acknowledged` is `true`). -### search [_search_3] +You can change the default of only waiting for the primary shards to start through the index setting `index.write.wait_for_active_shards`. +Note that changing this setting will also affect the `wait_for_active_shards` value on all subsequent write operations. -Run a Fleet search. The purpose of the Fleet search API is to provide an API where the search will be run only after the provided checkpoint has been processed and is visible for searches inside of Elasticsearch. +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-indices-create) ```ts -client.fleet.search({ index }) +client.indices.create({ index }) ``` +### Arguments [_arguments_indices.create] -### Arguments [_arguments_150] - -* **Request (object):** - - * **`index` (string | string)**: A single target to search. If the target is an index alias, it must resolve to a single index. - * **`aggregations` (Optional, Record)** - * **`collapse` (Optional, { field, inner_hits, max_concurrent_group_searches, collapse })** - * **`explain` (Optional, boolean)**: If true, returns detailed information about score computation as part of a hit. - * **`ext` (Optional, Record)**: Configuration of search extensions defined by Elasticsearch plugins. - * **`from` (Optional, number)**: Starting document offset. By default, you cannot page through more than 10,000 hits using the from and size parameters. To page through more hits, use the search_after parameter. - * **`highlight` (Optional, { encoder, fields })** - * **`track_total_hits` (Optional, boolean | number)**: Number of hits matching the query to count accurately. If true, the exact number of hits is returned at the cost of some performance. If false, the response does not include the total number of hits matching the query. Defaults to 10,000 hits. - * **`indices_boost` (Optional, Record[])**: Boosts the _score of documents from specified indices. - * **`docvalue_fields` (Optional, { field, format, include_unmapped }[])**: Array of wildcard (*) patterns. The request returns doc values for field names matching these patterns in the hits.fields property of the response. - * **`min_score` (Optional, number)**: Minimum _score for matching documents. Documents with a lower _score are not included in the search results. - * **`post_filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })** - * **`profile` (Optional, boolean)** - * **`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: Defines the search definition using the Query DSL. - * **`rescore` (Optional, { window_size, query, learning_to_rank } | { window_size, query, learning_to_rank }[])** - * **`script_fields` (Optional, Record)**: Retrieve a script evaluation (based on different fields) for each hit. - * **`search_after` (Optional, number | number | string | boolean | null | User-defined value[])** - * **`size` (Optional, number)**: The number of hits to return. By default, you cannot page through more than 10,000 hits using the from and size parameters. To page through more hits, use the search_after parameter. - * **`slice` (Optional, { field, id, max })** - * **`sort` (Optional, string | { _score, _doc, _geo_distance, _script } | string | { _score, _doc, _geo_distance, _script }[])** - * **`_source` (Optional, boolean | { excludes, includes })**: Indicates which source fields are returned for matching documents. These fields are returned in the hits._source property of the search response. - * **`fields` (Optional, { field, format, include_unmapped }[])**: Array of wildcard (*) patterns. The request returns values for field names matching these patterns in the hits.fields property of the response. - * **`suggest` (Optional, { text })** - * **`terminate_after` (Optional, number)**: Maximum number of documents to collect for each shard. If a query reaches this limit, Elasticsearch terminates the query early. Elasticsearch collects documents before sorting. Defaults to 0, which does not terminate query execution early. - * **`timeout` (Optional, string)**: Specifies the period of time to wait for a response from each shard. If no response is received before the timeout expires, the request fails and returns an error. Defaults to no timeout. - * **`track_scores` (Optional, boolean)**: If true, calculate and return document scores, even if the scores are not used for sorting. - * **`version` (Optional, boolean)**: If true, returns document version as part of a hit. - * **`seq_no_primary_term` (Optional, boolean)**: If true, returns sequence number and primary term of the last modification of each hit. See Optimistic concurrency control. - * **`stored_fields` (Optional, string | string[])**: List of stored fields to return as part of a hit. If no fields are specified, no stored fields are included in the response. If this field is specified, the _source parameter defaults to false. You can pass _source: true to return both source fields and stored fields in the search response. - * **`pit` (Optional, { id, keep_alive })**: Limits the search to a point in time (PIT). If you provide a PIT, you cannot specify an in the request path. - * **`runtime_mappings` (Optional, Record)**: Defines one or more runtime fields in the search request. These fields take precedence over mapped fields with the same name. - * **`stats` (Optional, string[])**: Stats groups to associate with the search. Each group maintains a statistics aggregation for its associated searches. You can retrieve these stats using the indices stats API. - * **`allow_no_indices` (Optional, boolean)** - * **`analyzer` (Optional, string)** - * **`analyze_wildcard` (Optional, boolean)** - * **`batched_reduce_size` (Optional, number)** - * **`ccs_minimize_roundtrips` (Optional, boolean)** - * **`default_operator` (Optional, Enum("and" | "or"))** - * **`df` (Optional, string)** - * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])** - * **`ignore_throttled` (Optional, boolean)** - * **`ignore_unavailable` (Optional, boolean)** - * **`lenient` (Optional, boolean)** - * **`max_concurrent_shard_requests` (Optional, number)** - * **`preference` (Optional, string)** - * **`pre_filter_shard_size` (Optional, number)** - * **`request_cache` (Optional, boolean)** - * **`routing` (Optional, string)** - * **`scroll` (Optional, string | -1 | 0)** - * **`search_type` (Optional, Enum("query_then_fetch" | "dfs_query_then_fetch"))** - * **`suggest_field` (Optional, string)**: Specifies which field to use for suggestions. - * **`suggest_mode` (Optional, Enum("missing" | "popular" | "always"))** - * **`suggest_size` (Optional, number)** - * **`suggest_text` (Optional, string)**: The source text for which the suggestions should be returned. - * **`typed_keys` (Optional, boolean)** - * **`rest_total_hits_as_int` (Optional, boolean)** - * **`_source_excludes` (Optional, string | string[])** - * **`_source_includes` (Optional, string | string[])** - * **`q` (Optional, string)** - * **`wait_for_checkpoints` (Optional, number[])**: A comma separated list of checkpoints. When configured, the search API will only be executed on a shard after the relevant checkpoint has become visible for search. Defaults to an empty list which will cause Elasticsearch to immediately execute the search. - * **`allow_partial_search_results` (Optional, boolean)**: If true, returns partial results if there are shard request timeouts or [shard failures](docs-content://deploy-manage/distributed-architecture/reading-and-writing-documents.md#shard-failures). If false, returns an error with no partial results. Defaults to the configured cluster setting `search.default_allow_partial_results` which is true by default. - - - -## graph [_graph] - - -### explore [_explore] - -Explore graph analytics. Extract and summarize information about the documents and terms in an Elasticsearch data stream or index. The easiest way to understand the behavior of this API is to use the Graph UI to explore connections. An initial request to the `_explore` API contains a seed query that identifies the documents of interest and specifies the fields that define the vertices and connections you want to include in the graph. Subsequent requests enable you to spider out from one more vertices of interest. You can exclude vertices that have already been returned. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-graph) +#### Request (object) [_request_indices.create] +- **`index` (string)**: Name of the index you wish to create. +- **`aliases` (Optional, Record)**: Aliases for the index. +- **`mappings` (Optional, { all_field, date_detection, dynamic, dynamic_date_formats, dynamic_templates, _field_names, index_field, _meta, numeric_detection, properties, _routing, _size, _source, runtime, enabled, subobjects, _data_stream_timestamp })**: Mapping for fields in the index. If specified, this mapping can include: +- Field names +- Field data types +- Mapping parameters +- **`settings` (Optional, { index, mode, routing_path, soft_deletes, sort, number_of_shards, number_of_replicas, number_of_routing_shards, check_on_startup, codec, routing_partition_size, load_fixed_bitset_filters_eagerly, hidden, auto_expand_replicas, merge, search, refresh_interval, max_result_window, max_inner_result_window, max_rescore_window, max_docvalue_fields_search, max_script_fields, max_ngram_diff, max_shingle_diff, blocks, max_refresh_listeners, analyze, highlight, max_terms_count, max_regex_length, routing, gc_deletes, default_pipeline, final_pipeline, lifecycle, provided_name, creation_date, creation_date_string, uuid, version, verified_before_close, format, max_slices_per_scroll, translog, query_string, priority, top_metrics_max_size, analysis, settings, time_series, queries, similarity, mapping, indexing.slowlog, indexing_pressure, store })**: Configuration options for the index. +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. +- **`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))**: The number of shard copies that must be active before proceeding with the operation. +Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). + +## client.indices.createDataStream [_indices.create_data_stream] +Create a data stream. + +You must have a matching index template with data stream enabled. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-indices-create-data-stream) ```ts -client.graph.explore({ index }) +client.indices.createDataStream({ name }) ``` +### Arguments [_arguments_indices.create_data_stream] -### Arguments [_arguments_151] +#### Request (object) [_request_indices.create_data_stream] +- **`name` (string)**: Name of the data stream, which must meet the following criteria: +Lowercase only; +Cannot include `\`, `/`, `*`, `?`, `"`, `<`, `>`, `|`, `,`, `#`, `:`, or a space character; +Cannot start with `-`, `_`, `+`, or `.ds-`; +Cannot be `.` or `..`; +Cannot be longer than 255 bytes. Multi-byte characters count towards this limit faster. +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. -* **Request (object):** +## client.indices.createFrom [_indices.create_from] +Create an index from a source index. - * **`index` (string | string[])**: Name of the index. - * **`connections` (Optional, { connections, query, vertices })**: Specifies or more fields from which you want to extract terms that are associated with the specified vertices. - * **`controls` (Optional, { sample_diversity, sample_size, timeout, use_significance })**: Direct the Graph API how to build the graph. - * **`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: A seed query that identifies the documents of interest. Can be any valid Elasticsearch query. - * **`vertices` (Optional, { exclude, field, include, min_doc_count, shard_min_doc_count, size }[])**: Specifies one or more fields that contain the terms you want to include in the graph as vertices. - * **`routing` (Optional, string)**: Custom value used to route operations to a specific shard. - * **`timeout` (Optional, string | -1 | 0)**: Specifies the period of time to wait for a response from each shard. If no response is received before the timeout expires, the request fails and returns an error. Defaults to no timeout. +Copy the mappings and settings from the source index to a destination index while allowing request settings and mappings to override the source values. +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-indices-create-from) +```ts +client.indices.createFrom({ source, dest }) +``` -## ilm [_ilm] +### Arguments [_arguments_indices.create_from] +#### Request (object) [_request_indices.create_from] +- **`source` (string)**: The source index or data stream name +- **`dest` (string)**: The destination index or data stream name +- **`create_from` (Optional, { mappings_override, settings_override, remove_index_blocks })** -### delete_lifecycle [_delete_lifecycle] +## client.indices.dataStreamsStats [_indices.data_streams_stats] +Get data stream stats. -Delete a lifecycle policy. You cannot delete policies that are currently in use. If the policy is being used to manage any indices, the request fails and returns an error. +Get statistics for one or more data streams. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-delete-lifecycle) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-indices-data-streams-stats-1) ```ts -client.ilm.deleteLifecycle({ policy }) +client.indices.dataStreamsStats({ ... }) ``` +### Arguments [_arguments_indices.data_streams_stats] -### Arguments [_arguments_152] +#### Request (object) [_request_indices.data_streams_stats] +- **`name` (Optional, string)**: List of data streams used to limit the request. +Wildcard expressions (`*`) are supported. +To target all data streams in a cluster, omit this parameter or use `*`. +- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of data stream that wildcard patterns can match. +Supports a list of values, such as `open,hidden`. -* **Request (object):** +## client.indices.delete [_indices.delete] +Delete indices. +Deleting an index deletes its documents, shards, and metadata. +It does not delete related Kibana components, such as data views, visualizations, or dashboards. - * **`policy` (string)**: Identifier for the policy. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +You cannot delete the current write index of a data stream. +To delete the index, you must roll over the data stream so a new write index is created. +You can then use the delete index API to delete the previous write index. +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-indices-delete) +```ts +client.indices.delete({ index }) +``` -### explain_lifecycle [_explain_lifecycle] +### Arguments [_arguments_indices.delete] -Explain the lifecycle state. Get the current lifecycle status for one or more indices. For data streams, the API retrieves the current lifecycle status for the stream’s backing indices. +#### Request (object) [_request_indices.delete] +- **`index` (string | string[])**: List of indices to delete. +You cannot specify index aliases. +By default, this parameter does not support wildcards (`*`) or `_all`. +To use wildcards or `_all`, set the `action.destructive_requires_name` cluster setting to `false`. +- **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. +This behavior applies even if the request targets other open indices. +- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +Supports a list of values, such as `open,hidden`. +Valid values are: `all`, `open`, `closed`, `hidden`, `none`. +- **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. -The response indicates when the index entered each lifecycle state, provides the definition of the running phase, and information about any failures. +## client.indices.deleteAlias [_indices.delete_alias] +Delete an alias. +Removes a data stream or index from an alias. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-explain-lifecycle) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-indices-delete-alias) ```ts -client.ilm.explainLifecycle({ index }) +client.indices.deleteAlias({ index, name }) ``` +### Arguments [_arguments_indices.delete_alias] -### Arguments [_arguments_153] +#### Request (object) [_request_indices.delete_alias] +- **`index` (string | string[])**: List of data streams or indices used to limit the request. +Supports wildcards (`*`). +- **`name` (string | string[])**: List of aliases to remove. +Supports wildcards (`*`). To remove all aliases, use `*` or `_all`. +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. -* **Request (object):** +## client.indices.deleteDataLifecycle [_indices.delete_data_lifecycle] +Delete data stream lifecycles. +Removes the data stream lifecycle from a data stream, rendering it not managed by the data stream lifecycle. - * **`index` (string)**: List of data streams, indices, and aliases to target. Supports wildcards (`*`). To target all data streams and indices, use `*` or `_all`. - * **`only_errors` (Optional, boolean)**: Filters the returned indices to only indices that are managed by ILM and are in an error state, either due to an encountering an error while executing the policy, or attempting to use a policy that does not exist. - * **`only_managed` (Optional, boolean)**: Filters the returned indices to only indices that are managed by ILM. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-indices-delete-data-lifecycle) +```ts +client.indices.deleteDataLifecycle({ name }) +``` +### Arguments [_arguments_indices.delete_data_lifecycle] -### get_lifecycle [_get_lifecycle] +#### Request (object) [_request_indices.delete_data_lifecycle] +- **`name` (string | string[])**: A list of data streams of which the data stream lifecycle will be deleted; use `*` to get all data streams +- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Whether wildcard expressions should get expanded to open or closed indices (default: open) +- **`master_timeout` (Optional, string | -1 | 0)**: Specify timeout for connection to master +- **`timeout` (Optional, string | -1 | 0)**: Explicit timestamp for the document -Get lifecycle policies. +## client.indices.deleteDataStream [_indices.delete_data_stream] +Delete data streams. +Deletes one or more data streams and their backing indices. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-get-lifecycle) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-indices-delete-data-stream) ```ts -client.ilm.getLifecycle({ ... }) +client.indices.deleteDataStream({ name }) ``` +### Arguments [_arguments_indices.delete_data_stream] -### Arguments [_arguments_154] +#### Request (object) [_request_indices.delete_data_stream] +- **`name` (string | string[])**: List of data streams to delete. Wildcard (`*`) expressions are supported. +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of data stream that wildcard patterns can match. Supports a list of values,such as `open,hidden`. -* **Request (object):** +## client.indices.deleteIndexTemplate [_indices.delete_index_template] +Delete an index template. +The provided may contain multiple template names separated by a comma. If multiple template +names are specified then there is no wildcard support and the provided names should match completely with +existing templates. - * **`policy` (Optional, string)**: Identifier for the policy. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-indices-delete-index-template) +```ts +client.indices.deleteIndexTemplate({ name }) +``` +### Arguments [_arguments_indices.delete_index_template] -### get_status [_get_status_2] +#### Request (object) [_request_indices.delete_index_template] +- **`name` (string | string[])**: List of index template names used to limit the request. Wildcard (*) expressions are supported. +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. -Get the ILM status. Get the current index lifecycle management status. +## client.indices.deleteTemplate [_indices.delete_template] +Delete a legacy index template. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-get-status) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-indices-delete-template) ```ts -client.ilm.getStatus() +client.indices.deleteTemplate({ name }) ``` +### Arguments [_arguments_indices.delete_template] -### migrate_to_data_tiers [_migrate_to_data_tiers] - -Migrate to data tiers routing. Switch the indices, ILM policies, and legacy, composable, and component templates from using custom node attributes and attribute-based allocation filters to using data tiers. Optionally, delete one legacy index template. Using node roles enables ILM to automatically move the indices between data tiers. - -Migrating away from custom node attributes routing can be manually performed. This API provides an automated way of performing three out of the four manual steps listed in the migration guide: +#### Request (object) [_request_indices.delete_template] +- **`name` (string)**: The name of the legacy index template to delete. +Wildcard (`*`) expressions are supported. +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. -1. Stop setting the custom hot attribute on new indices. -2. Remove custom allocation settings from existing ILM policies. -3. Replace custom allocation settings from existing indices with the corresponding tier preference. +## client.indices.diskUsage [_indices.disk_usage] +Analyze the index disk usage. +Analyze the disk usage of each field of an index or data stream. +This API might not support indices created in previous Elasticsearch versions. +The result of a small index can be inaccurate as some parts of an index might not be analyzed by the API. -ILM must be stopped before performing the migration. Use the stop ILM and get ILM status APIs to wait until the reported operation mode is `STOPPED`. +NOTE: The total size of fields of the analyzed shards of the index in the response is usually smaller than the index `store_size` value because some small metadata files are ignored and some parts of data files might not be scanned by the API. +Since stored fields are stored together in a compressed format, the sizes of stored fields are also estimates and can be inaccurate. +The stored size of the `_id` field is likely underestimated while the `_source` field is overestimated. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-migrate-to-data-tiers) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-indices-disk-usage) ```ts -client.ilm.migrateToDataTiers({ ... }) +client.indices.diskUsage({ index }) ``` +### Arguments [_arguments_indices.disk_usage] -### Arguments [_arguments_155] - -* **Request (object):** +#### Request (object) [_request_indices.disk_usage] +- **`index` (string | string[])**: List of data streams, indices, and aliases used to limit the request. +It’s recommended to execute this API with a single index (or the latest backing index of a data stream) as the API consumes resources significantly. +- **`allow_no_indices` (Optional, boolean)**: If false, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. +This behavior applies even if the request targets other open indices. +For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. +- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +Supports a list of values, such as `open,hidden`. +- **`flush` (Optional, boolean)**: If `true`, the API performs a flush before analysis. +If `false`, the response may not include uncommitted data. +- **`ignore_unavailable` (Optional, boolean)**: If `true`, missing or closed indices are not included in the response. +- **`run_expensive_tasks` (Optional, boolean)**: Analyzing field disk usage is resource-intensive. +To use the API, this parameter must be set to `true`. - * **`legacy_template_to_delete` (Optional, string)** - * **`node_attribute` (Optional, string)** - * **`dry_run` (Optional, boolean)**: If true, simulates the migration from node attributes based allocation filters to data tiers, but does not perform the migration. This provides a way to retrieve the indices and ILM policies that need to be migrated. +## client.indices.downsample [_indices.downsample] +Downsample an index. +Aggregate a time series (TSDS) index and store pre-computed statistical summaries (`min`, `max`, `sum`, `value_count` and `avg`) for each metric field grouped by a configured time interval. +For example, a TSDS index that contains metrics sampled every 10 seconds can be downsampled to an hourly index. +All documents within an hour interval are summarized and stored as a single document in the downsample index. +NOTE: Only indices in a time series data stream are supported. +Neither field nor document level security can be defined on the source index. +The source index must be read only (`index.blocks.write: true`). +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-indices-downsample) -### move_to_step [_move_to_step] - -Move to a lifecycle step. Manually move an index into a specific step in the lifecycle policy and run that step. - -::::{warning} -This operation can result in the loss of data. Manually moving an index into a specific step runs that step even if it has already been performed. This is a potentially destructive action and this should be considered an expert level API. -:::: +```ts +client.indices.downsample({ index, target_index }) +``` +### Arguments [_arguments_indices.downsample] -You must specify both the current step and the step to be executed in the body of the request. The request will fail if the current step does not match the step currently running for the index This is to prevent the index from being moved from an unexpected step into the next step. +#### Request (object) [_request_indices.downsample] +- **`index` (string)**: Name of the time series index to downsample. +- **`target_index` (string)**: Name of the index to create. +- **`config` (Optional, { fixed_interval })** -When specifying the target (`next_step`) to which the index will be moved, either the name or both the action and name fields are optional. If only the phase is specified, the index will move to the first step of the first action in the target phase. If the phase and action are specified, the index will move to the first step of the specified action in the specified phase. Only actions specified in the ILM policy are considered valid. An index cannot move to a step that is not part of its policy. +## client.indices.exists [_indices.exists] +Check indices. +Check if one or more indices, index aliases, or data streams exist. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-move-to-step) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-indices-exists) ```ts -client.ilm.moveToStep({ index, current_step, next_step }) +client.indices.exists({ index }) ``` +### Arguments [_arguments_indices.exists] -### Arguments [_arguments_156] +#### Request (object) [_request_indices.exists] +- **`index` (string | string[])**: List of data streams, indices, and aliases. Supports wildcards (`*`). +- **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. +This behavior applies even if the request targets other open indices. +- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +Supports a list of values, such as `open,hidden`. +Valid values are: `all`, `open`, `closed`, `hidden`, `none`. +- **`flat_settings` (Optional, boolean)**: If `true`, returns settings in flat format. +- **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. +- **`include_defaults` (Optional, boolean)**: If `true`, return all default settings in the response. +- **`local` (Optional, boolean)**: If `true`, the request retrieves information from the local node only. -* **Request (object):** +## client.indices.existsAlias [_indices.exists_alias] +Check aliases. - * **`index` (string)**: The name of the index whose lifecycle step is to change - * **`current_step` ({ action, name, phase })**: The step that the index is expected to be in. - * **`next_step` ({ action, name, phase })**: The step that you want to run. +Check if one or more data stream or index aliases exist. +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-indices-exists-alias) +```ts +client.indices.existsAlias({ name }) +``` -### put_lifecycle [_put_lifecycle] +### Arguments [_arguments_indices.exists_alias] -Create or update a lifecycle policy. If the specified policy exists, it is replaced and the policy version is incremented. +#### Request (object) [_request_indices.exists_alias] +- **`name` (string | string[])**: List of aliases to check. Supports wildcards (`*`). +- **`index` (Optional, string | string[])**: List of data streams or indices used to limit the request. Supports wildcards (`*`). +To target all data streams and indices, omit this parameter or use `*` or `_all`. +- **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. +This behavior applies even if the request targets other open indices. +- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +Supports a list of values, such as `open,hidden`. +Valid values are: `all`, `open`, `closed`, `hidden`, `none`. +- **`ignore_unavailable` (Optional, boolean)**: If `false`, requests that include a missing data stream or index in the target indices or data streams return an error. +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. -::::{note} -Only the latest version of the policy is stored, you cannot revert to previous versions. -:::: +## client.indices.existsIndexTemplate [_indices.exists_index_template] +Check index templates. +Check whether index templates exist. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-put-lifecycle) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-indices-exists-index-template) ```ts -client.ilm.putLifecycle({ policy }) +client.indices.existsIndexTemplate({ name }) ``` +### Arguments [_arguments_indices.exists_index_template] -### Arguments [_arguments_157] +#### Request (object) [_request_indices.exists_index_template] +- **`name` (string)**: List of index template names used to limit the request. Wildcard (*) expressions are supported. +- **`local` (Optional, boolean)**: If true, the request retrieves information from the local node only. Defaults to false, which means information is retrieved from the master node. +- **`flat_settings` (Optional, boolean)**: If true, returns settings in flat format. +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. -* **Request (object):** +## client.indices.existsTemplate [_indices.exists_template] +Check existence of index templates. +Get information about whether index templates exist. +Index templates define settings, mappings, and aliases that can be applied automatically to new indices. - * **`policy` (string)**: Identifier for the policy. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8. +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-indices-exists-template) + +```ts +client.indices.existsTemplate({ name }) +``` +### Arguments [_arguments_indices.exists_template] -### remove_policy [_remove_policy] +#### Request (object) [_request_indices.exists_template] +- **`name` (string | string[])**: A list of index template names used to limit the request. +Wildcard (`*`) expressions are supported. +- **`flat_settings` (Optional, boolean)**: Indicates whether to use a flat format for the response. +- **`local` (Optional, boolean)**: Indicates whether to get information from the local node only. +- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for the master node. +If the master node is not available before the timeout expires, the request fails and returns an error. +To indicate that the request should never timeout, set it to `-1`. -Remove policies from an index. Remove the assigned lifecycle policies from an index or a data stream’s backing indices. It also stops managing the indices. +## client.indices.explainDataLifecycle [_indices.explain_data_lifecycle] +Get the status for a data stream lifecycle. +Get information about an index or data stream's current data stream lifecycle status, such as time since index creation, time since rollover, the lifecycle configuration managing the index, or any errors encountered during lifecycle execution. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-remove-policy) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-indices-explain-data-lifecycle) ```ts -client.ilm.removePolicy({ index }) +client.indices.explainDataLifecycle({ index }) ``` +### Arguments [_arguments_indices.explain_data_lifecycle] -### Arguments [_arguments_158] - -* **Request (object):** - - * **`index` (string)**: The name of the index to remove policy on +#### Request (object) [_request_indices.explain_data_lifecycle] +- **`index` (string | string[])**: The name of the index to explain +- **`include_defaults` (Optional, boolean)**: indicates if the API should return the default values the system uses for the index's lifecycle +- **`master_timeout` (Optional, string | -1 | 0)**: Specify timeout for connection to master +## client.indices.fieldUsageStats [_indices.field_usage_stats] +Get field usage stats. +Get field usage information for each shard and field of an index. +Field usage statistics are automatically captured when queries are running on a cluster. +A shard-level search request that accesses a given field, even if multiple times during that request, is counted as a single use. +The response body reports the per-shard usage count of the data structures that back the fields in the index. +A given request will increment each count by a maximum value of 1, even if the request accesses the same field multiple times. -### retry [_retry] - -Retry a policy. Retry running the lifecycle policy for an index that is in the ERROR step. The API sets the policy back to the step where the error occurred and runs the step. Use the explain lifecycle state API to determine whether an index is in the ERROR step. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-retry) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-indices-field-usage-stats) ```ts -client.ilm.retry({ index }) +client.indices.fieldUsageStats({ index }) ``` +### Arguments [_arguments_indices.field_usage_stats] -### Arguments [_arguments_159] +#### Request (object) [_request_indices.field_usage_stats] +- **`index` (string | string[])**: List or wildcard expression of index names used to limit the request. +- **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. +This behavior applies even if the request targets other open indices. +For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. +- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +Supports a list of values, such as `open,hidden`. +- **`ignore_unavailable` (Optional, boolean)**: If `true`, missing or closed indices are not included in the response. +- **`fields` (Optional, string | string[])**: List or wildcard expressions of fields to include in the statistics. -* **Request (object):** +## client.indices.flush [_indices.flush] +Flush data streams or indices. +Flushing a data stream or index is the process of making sure that any data that is currently only stored in the transaction log is also permanently stored in the Lucene index. +When restarting, Elasticsearch replays any unflushed operations from the transaction log into the Lucene index to bring it back into the state that it was in before the restart. +Elasticsearch automatically triggers flushes as needed, using heuristics that trade off the size of the unflushed transaction log against the cost of performing each flush. - * **`index` (string)**: The name of the indices (comma-separated) whose failed lifecycle step is to be retry +After each operation has been flushed it is permanently stored in the Lucene index. +This may mean that there is no need to maintain an additional copy of it in the transaction log. +The transaction log is made up of multiple files, called generations, and Elasticsearch will delete any generation files when they are no longer needed, freeing up disk space. +It is also possible to trigger a flush on one or more indices using the flush API, although it is rare for users to need to call this API directly. +If you call the flush API after indexing some documents then a successful response indicates that Elasticsearch has flushed all the documents that were indexed before the flush API was called. - -### start [_start] - -Start the ILM plugin. Start the index lifecycle management plugin if it is currently stopped. ILM is started automatically when the cluster is formed. Restarting ILM is necessary only when it has been stopped using the stop ILM API. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-start) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-indices-flush) ```ts -client.ilm.start({ ... }) +client.indices.flush({ ... }) ``` +### Arguments [_arguments_indices.flush] + +#### Request (object) [_request_indices.flush] +- **`index` (Optional, string | string[])**: List of data streams, indices, and aliases to flush. +Supports wildcards (`*`). +To flush all data streams and indices, omit this parameter or use `*` or `_all`. +- **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. +This behavior applies even if the request targets other open indices. +- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +Supports a list of values, such as `open,hidden`. +Valid values are: `all`, `open`, `closed`, `hidden`, `none`. +- **`force` (Optional, boolean)**: If `true`, the request forces a flush even if there are no changes to commit to the index. +- **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. +- **`wait_if_ongoing` (Optional, boolean)**: If `true`, the flush operation blocks until execution when another flush operation is running. +If `false`, Elasticsearch returns an error if you request a flush when another flush operation is running. + +## client.indices.forcemerge [_indices.forcemerge] +Force a merge. +Perform the force merge operation on the shards of one or more indices. +For data streams, the API forces a merge on the shards of the stream's backing indices. + +Merging reduces the number of segments in each shard by merging some of them together and also frees up the space used by deleted documents. +Merging normally happens automatically, but sometimes it is useful to trigger a merge manually. + +WARNING: We recommend force merging only a read-only index (meaning the index is no longer receiving writes). +When documents are updated or deleted, the old version is not immediately removed but instead soft-deleted and marked with a "tombstone". +These soft-deleted documents are automatically cleaned up during regular segment merges. +But force merge can cause very large (greater than 5 GB) segments to be produced, which are not eligible for regular merges. +So the number of soft-deleted documents can then grow rapidly, resulting in higher disk usage and worse search performance. +If you regularly force merge an index receiving writes, this can also make snapshots more expensive, since the new documents can't be backed up incrementally. -### Arguments [_arguments_160] +**Blocks during a force merge** -* **Request (object):** +Calls to this API block until the merge is complete (unless request contains `wait_for_completion=false`). +If the client connection is lost before completion then the force merge process will continue in the background. +Any new requests to force merge the same indices will also block until the ongoing force merge is complete. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +**Running force merge asynchronously** +If the request contains `wait_for_completion=false`, Elasticsearch performs some preflight checks, launches the request, and returns a task you can use to get the status of the task. +However, you can not cancel this task as the force merge task is not cancelable. +Elasticsearch creates a record of this task as a document at `_tasks/`. +When you are done with a task, you should delete the task document so Elasticsearch can reclaim the space. +**Force merging multiple indices** -### stop [_stop] +You can force merge multiple indices with a single request by targeting: -Stop the ILM plugin. Halt all lifecycle management operations and stop the index lifecycle management plugin. This is useful when you are performing maintenance on the cluster and need to prevent ILM from performing any actions on your indices. +* One or more data streams that contain multiple backing indices +* Multiple indices +* One or more aliases +* All data streams and indices in a cluster -The API returns as soon as the stop request has been acknowledged, but the plugin might continue to run until in-progress operations complete and the plugin can be safely stopped. Use the get ILM status API to check whether ILM is running. +Each targeted shard is force-merged separately using the force_merge threadpool. +By default each node only has a single `force_merge` thread which means that the shards on that node are force-merged one at a time. +If you expand the `force_merge` threadpool on a node then it will force merge its shards in parallel -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-stop) +Force merge makes the storage for the shard being merged temporarily increase, as it may require free space up to triple its size in case `max_num_segments parameter` is set to `1`, to rewrite all segments into a new one. -```ts -client.ilm.stop({ ... }) -``` +**Data streams and time-based indices** +Force-merging is useful for managing a data stream's older backing indices and other time-based indices, particularly after a rollover. +In these cases, each index only receives indexing traffic for a certain period of time. +Once an index receive no more writes, its shards can be force-merged to a single segment. +This can be a good idea because single-segment shards can sometimes use simpler and more efficient data structures to perform searches. +For example: -### Arguments [_arguments_161] +``` +POST /.ds-my-data-stream-2099.03.07-000001/_forcemerge?max_num_segments=1 +``` -* **Request (object):** +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-indices-forcemerge) - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +```ts +client.indices.forcemerge({ ... }) +``` +### Arguments [_arguments_indices.forcemerge] +#### Request (object) [_request_indices.forcemerge] +- **`index` (Optional, string | string[])**: A list of index names; use `_all` or empty string to perform the operation on all indices +- **`allow_no_indices` (Optional, boolean)**: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) +- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Whether to expand wildcard expression to concrete indices that are open, closed or both. +- **`flush` (Optional, boolean)**: Specify whether the index should be flushed after performing the operation (default: true) +- **`ignore_unavailable` (Optional, boolean)**: Whether specified concrete indices should be ignored when unavailable (missing or closed) +- **`max_num_segments` (Optional, number)**: The number of segments the index should be merged into (default: dynamic) +- **`only_expunge_deletes` (Optional, boolean)**: Specify whether the operation should only expunge deleted documents +- **`wait_for_completion` (Optional, boolean)**: Should the request wait until the force merge is completed. -## indices [_indices_2] +## client.indices.get [_indices.get] +Get index information. +Get information about one or more indices. For data streams, the API returns information about the +stream’s backing indices. +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-indices-get) -### add_block [_add_block] +```ts +client.indices.get({ index }) +``` -Add an index block. Limits the operations allowed on an index by blocking specific operation types. +### Arguments [_arguments_indices.get] + +#### Request (object) [_request_indices.get] +- **`index` (string | string[])**: List of data streams, indices, and index aliases used to limit the request. +Wildcard expressions (*) are supported. +- **`allow_no_indices` (Optional, boolean)**: If false, the request returns an error if any wildcard expression, index alias, or _all value targets only +missing or closed indices. This behavior applies even if the request targets other open indices. For example, +a request targeting foo*,bar* returns an error if an index starts with foo but no index starts with bar. +- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard expressions can match. If the request can target data streams, this argument +determines whether wildcard expressions match hidden data streams. Supports a list of values, +such as open,hidden. +- **`flat_settings` (Optional, boolean)**: If true, returns settings in flat format. +- **`ignore_unavailable` (Optional, boolean)**: If false, requests that target a missing index return an error. +- **`include_defaults` (Optional, boolean)**: If true, return all default settings in the response. +- **`local` (Optional, boolean)**: If true, the request retrieves information from the local node only. Defaults to false, which means information is retrieved from the master node. +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +- **`features` (Optional, { name, description } | { name, description }[])**: Return only information on specified index features + +## client.indices.getAlias [_indices.get_alias] +Get aliases. +Retrieves information for one or more data stream or index aliases. -[Index block settings](elasticsearch://reference/elasticsearch/index-settings/index-block.md) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-indices-get-alias) ```ts -client.indices.addBlock({ index, block }) +client.indices.getAlias({ ... }) ``` +### Arguments [_arguments_indices.get_alias] -### Arguments [_arguments_162] +#### Request (object) [_request_indices.get_alias] +- **`name` (Optional, string | string[])**: List of aliases to retrieve. +Supports wildcards (`*`). +To retrieve all aliases, omit this parameter or use `*` or `_all`. +- **`index` (Optional, string | string[])**: List of data streams or indices used to limit the request. +Supports wildcards (`*`). +To target all data streams and indices, omit this parameter or use `*` or `_all`. +- **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. +This behavior applies even if the request targets other open indices. +- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +Supports a list of values, such as `open,hidden`. +Valid values are: `all`, `open`, `closed`, `hidden`, `none`. +- **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. -* **Request (object):** +## client.indices.getDataLifecycle [_indices.get_data_lifecycle] +Get data stream lifecycles. - * **`index` (string)**: A comma separated list of indices to add a block to - * **`block` (Enum("metadata" | "read" | "read_only" | "write"))**: The block to add (one of read, write, read_only or metadata) - * **`allow_no_indices` (Optional, boolean)**: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) - * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Whether to expand wildcard expression to concrete indices that are open, closed or both. - * **`ignore_unavailable` (Optional, boolean)**: Whether specified concrete indices should be ignored when unavailable (missing or closed) - * **`master_timeout` (Optional, string | -1 | 0)**: Specify timeout for connection to master - * **`timeout` (Optional, string | -1 | 0)**: Explicit operation timeout +Get the data stream lifecycle configuration of one or more data streams. +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-indices-get-data-lifecycle) +```ts +client.indices.getDataLifecycle({ name }) +``` -### analyze [_analyze] +### Arguments [_arguments_indices.get_data_lifecycle] -Get tokens from text analysis. The analyze API performs analysis on a text string and returns the resulting tokens. +#### Request (object) [_request_indices.get_data_lifecycle] +- **`name` (string | string[])**: List of data streams to limit the request. +Supports wildcards (`*`). +To target all data streams, omit this parameter or use `*` or `_all`. +- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of data stream that wildcard patterns can match. +Supports a list of values, such as `open,hidden`. +Valid values are: `all`, `open`, `closed`, `hidden`, `none`. +- **`include_defaults` (Optional, boolean)**: If `true`, return all default settings in the response. +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. -Generating excessive amount of tokens may cause a node to run out of memory. The `index.analyze.max_token_count` setting enables you to limit the number of tokens that can be produced. If more than this limit of tokens gets generated, an error occurs. The `_analyze` endpoint without a specified index will always use `10000` as its limit. +## client.indices.getDataLifecycleStats [_indices.get_data_lifecycle_stats] +Get data stream lifecycle stats. +Get statistics about the data streams that are managed by a data stream lifecycle. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-analyze) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-indices-get-data-lifecycle-stats) ```ts -client.indices.analyze({ ... }) +client.indices.getDataLifecycleStats() ``` -### Arguments [_arguments_163] +## client.indices.getDataStream [_indices.get_data_stream] +Get data streams. + +Get information about one or more data streams. -* **Request (object):** +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-indices-get-data-stream) - * **`index` (Optional, string)**: Index used to derive the analyzer. If specified, the `analyzer` or field parameter overrides this value. If no index is specified or the index does not have a default analyzer, the analyze API uses the standard analyzer. - * **`analyzer` (Optional, string)**: The name of the analyzer that should be applied to the provided `text`. This could be a built-in analyzer, or an analyzer that’s been configured in the index. - * **`attributes` (Optional, string[])**: Array of token attributes used to filter the output of the `explain` parameter. - * **`char_filter` (Optional, string | { type, escaped_tags } | { type, mappings, mappings_path } | { type, flags, pattern, replacement } | { type, mode, name } | { type, normalize_kana, normalize_kanji }[])**: Array of character filters used to preprocess characters before the tokenizer. - * **`explain` (Optional, boolean)**: If `true`, the response includes token attributes and additional details. - * **`field` (Optional, string)**: Field used to derive the analyzer. To use this parameter, you must specify an index. If specified, the `analyzer` parameter overrides this value. - * **`filter` (Optional, string | { type, preserve_original } | { type, common_words, common_words_path, ignore_case, query_mode } | { type, filter, script } | { type, delimiter, encoding } | { type, max_gram, min_gram, side, preserve_original } | { type, articles, articles_path, articles_case } | { type, max_output_size, separator } | { type, dedup, dictionary, locale, longest_only } | { type } | { type, mode, types } | { type, keep_words, keep_words_case, keep_words_path } | { type, ignore_case, keywords, keywords_path, keywords_pattern } | { type } | { type, max, min } | { type, consume_all_tokens, max_token_count } | { type, language } | { type, filters, preserve_original } | { type, max_gram, min_gram, preserve_original } | { type, stoptags } | { type, patterns, preserve_original } | { type, all, flags, pattern, replacement } | { type } | { type, script } | { type } | { type } | { type, filler_token, max_shingle_size, min_shingle_size, output_unigrams, output_unigrams_if_no_shingles, token_separator } | { type, language } | { type, rules, rules_path } | { type, language } | { type, ignore_case, remove_trailing, stopwords, stopwords_path } | { type, expand, format, lenient, synonyms, synonyms_path, synonyms_set, tokenizer, updateable } | { type, expand, format, lenient, synonyms, synonyms_path, synonyms_set, tokenizer, updateable } | { type } | { type, length } | { type, only_on_same_position } | { type } | { type, adjust_offsets, catenate_all, catenate_numbers, catenate_words, generate_number_parts, generate_word_parts, ignore_keywords, preserve_original, protected_words, protected_words_path, split_on_case_change, split_on_numerics, stem_english_possessive, type_table, type_table_path } | { type, catenate_all, catenate_numbers, catenate_words, generate_number_parts, generate_word_parts, preserve_original, protected_words, protected_words_path, split_on_case_change, split_on_numerics, stem_english_possessive, type_table, type_table_path } | { type, minimum_length } | { type, use_romaji } | { type, stoptags } | { type, alternate, case_first, case_level, country, decomposition, hiragana_quaternary_mode, language, numeric, rules, strength, variable_top, variant } | { type, unicode_set_filter } | { type, name } | { type, dir, id } | { type, encoder, languageset, max_code_len, name_type, replace, rule_type } | { type }[])**: Array of token filters used to apply after the tokenizer. - * **`normalizer` (Optional, string)**: Normalizer to use to convert text into a single token. - * **`text` (Optional, string | string[])**: Text to analyze. If an array of strings is provided, it is analyzed as a multi-value field. - * **`tokenizer` (Optional, string | { type, tokenize_on_chars, max_token_length } | { type, max_token_length } | { type, custom_token_chars, max_gram, min_gram, token_chars } | { type, buffer_size } | { type } | { type } | { type, custom_token_chars, max_gram, min_gram, token_chars } | { type, buffer_size, delimiter, replacement, reverse, skip } | { type, flags, group, pattern } | { type, pattern } | { type, pattern } | { type, max_token_length } | { type } | { type, max_token_length } | { type, max_token_length } | { type, rule_files } | { type, discard_punctuation, mode, nbest_cost, nbest_examples, user_dictionary, user_dictionary_rules, discard_compound_token } | { type, decompound_mode, discard_punctuation, user_dictionary, user_dictionary_rules })**: Tokenizer to use to convert text into tokens. +```ts +client.indices.getDataStream({ ... }) +``` +### Arguments [_arguments_indices.get_data_stream] +#### Request (object) [_request_indices.get_data_stream] +- **`name` (Optional, string | string[])**: List of data stream names used to limit the request. +Wildcard (`*`) expressions are supported. If omitted, all data streams are returned. +- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of data stream that wildcard patterns can match. +Supports a list of values, such as `open,hidden`. +- **`include_defaults` (Optional, boolean)**: If true, returns all relevant default configurations for the index template. +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +- **`verbose` (Optional, boolean)**: Whether the maximum timestamp for each data stream should be calculated and returned. -### cancel_migrate_reindex [_cancel_migrate_reindex] +## client.indices.getFieldMapping [_indices.get_field_mapping] +Get mapping definitions. +Retrieves mapping definitions for one or more fields. +For data streams, the API retrieves field mappings for the stream’s backing indices. -Cancel a migration reindex operation. +This API is useful if you don't need a complete mapping or if an index mapping contains a large number of fields. -Cancel a migration reindex attempt for a data stream or index. +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-indices-get-mapping) ```ts -client.indices.cancelMigrateReindex({ index }) +client.indices.getFieldMapping({ fields }) ``` +### Arguments [_arguments_indices.get_field_mapping] + +#### Request (object) [_request_indices.get_field_mapping] +- **`fields` (string | string[])**: List or wildcard expression of fields used to limit returned information. +Supports wildcards (`*`). +- **`index` (Optional, string | string[])**: List of data streams, indices, and aliases used to limit the request. +Supports wildcards (`*`). +To target all data streams and indices, omit this parameter or use `*` or `_all`. +- **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. +This behavior applies even if the request targets other open indices. +- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +Supports a list of values, such as `open,hidden`. +Valid values are: `all`, `open`, `closed`, `hidden`, `none`. +- **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. +- **`include_defaults` (Optional, boolean)**: If `true`, return all default settings in the response. +- **`local` (Optional, boolean)**: If `true`, the request retrieves information from the local node only. + +## client.indices.getIndexTemplate [_indices.get_index_template] +Get index templates. +Get information about one or more index templates. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-indices-get-index-template) -### Arguments [_arguments_164] +```ts +client.indices.getIndexTemplate({ ... }) +``` -* **Request (object):** +### Arguments [_arguments_indices.get_index_template] - * **`index` (string | string[])**: The index or data stream name +#### Request (object) [_request_indices.get_index_template] +- **`name` (Optional, string)**: List of index template names used to limit the request. Wildcard (*) expressions are supported. +- **`local` (Optional, boolean)**: If true, the request retrieves information from the local node only. Defaults to false, which means information is retrieved from the master node. +- **`flat_settings` (Optional, boolean)**: If true, returns settings in flat format. +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +- **`include_defaults` (Optional, boolean)**: If true, returns all relevant default configurations for the index template. +## client.indices.getMapping [_indices.get_mapping] +Get mapping definitions. +For data streams, the API retrieves mappings for the stream’s backing indices. +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-indices-get-mapping) -### clear_cache [_clear_cache] +```ts +client.indices.getMapping({ ... }) +``` -Clear the cache. Clear the cache of one or more indices. For data streams, the API clears the caches of the stream’s backing indices. +### Arguments [_arguments_indices.get_mapping] + +#### Request (object) [_request_indices.get_mapping] +- **`index` (Optional, string | string[])**: List of data streams, indices, and aliases used to limit the request. +Supports wildcards (`*`). +To target all data streams and indices, omit this parameter or use `*` or `_all`. +- **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. +This behavior applies even if the request targets other open indices. +- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +Supports a list of values, such as `open,hidden`. +Valid values are: `all`, `open`, `closed`, `hidden`, `none`. +- **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. +- **`local` (Optional, boolean)**: If `true`, the request retrieves information from the local node only. +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. + +## client.indices.getMigrateReindexStatus [_indices.get_migrate_reindex_status] +Get the migration reindexing status. -By default, the clear cache API clears all caches. To clear only specific caches, use the `fielddata`, `query`, or `request` parameters. To clear the cache only of specific fields, use the `fields` parameter. +Get the status of a migration reindex attempt for a data stream or index. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-clear-cache) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/group/endpoint-migration) ```ts -client.indices.clearCache({ ... }) +client.indices.getMigrateReindexStatus({ index }) ``` +### Arguments [_arguments_indices.get_migrate_reindex_status] -### Arguments [_arguments_165] +#### Request (object) [_request_indices.get_migrate_reindex_status] +- **`index` (string | string[])**: The index or data stream name. -* **Request (object):** +## client.indices.getSettings [_indices.get_settings] +Get index settings. +Get setting information for one or more indices. +For data streams, it returns setting information for the stream's backing indices. - * **`index` (Optional, string | string[])**: List of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. - * **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. - * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. - * **`fielddata` (Optional, boolean)**: If `true`, clears the fields cache. Use the `fields` parameter to clear the cache of specific fields only. - * **`fields` (Optional, string | string[])**: List of field names used to limit the `fielddata` parameter. - * **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. - * **`query` (Optional, boolean)**: If `true`, clears the query cache. - * **`request` (Optional, boolean)**: If `true`, clears the request cache. +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-indices-get-settings) +```ts +client.indices.getSettings({ ... }) +``` +### Arguments [_arguments_indices.get_settings] -### clone [_clone] +#### Request (object) [_request_indices.get_settings] +- **`index` (Optional, string | string[])**: List of data streams, indices, and aliases used to limit +the request. Supports wildcards (`*`). To target all data streams and +indices, omit this parameter or use `*` or `_all`. +- **`name` (Optional, string | string[])**: List or wildcard expression of settings to retrieve. +- **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index +alias, or `_all` value targets only missing or closed indices. This +behavior applies even if the request targets other open indices. For +example, a request targeting `foo*,bar*` returns an error if an index +starts with foo but no index starts with `bar`. +- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +Supports a list of values, such as `open,hidden`. +- **`flat_settings` (Optional, boolean)**: If `true`, returns settings in flat format. +- **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. +- **`include_defaults` (Optional, boolean)**: If `true`, return all default settings in the response. +- **`local` (Optional, boolean)**: If `true`, the request retrieves information from the local node only. If +`false`, information is retrieved from the master node. +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is +received before the timeout expires, the request fails and returns an +error. -Clone an index. Clone an existing index into a new index. Each original primary shard is cloned into a new primary shard in the new index. +## client.indices.getTemplate [_indices.get_template] +Get index templates. +Get information about one or more index templates. -::::{important} -Elasticsearch does not apply index templates to the resulting index. The API also does not copy index metadata from the original index. Index metadata includes aliases, index lifecycle management phase definitions, and cross-cluster replication (CCR) follower information. For example, if you clone a CCR follower index, the resulting clone will not be a follower index. -:::: +IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8. +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-indices-get-template) -The clone API copies most index settings from the source index to the resulting index, with the exception of `index.number_of_replicas` and `index.auto_expand_replicas`. To set the number of replicas in the resulting index, configure these settings in the clone request. +```ts +client.indices.getTemplate({ ... }) +``` -Cloning works as follows: +### Arguments [_arguments_indices.get_template] -* First, it creates a new target index with the same definition as the source index. -* Then it hard-links segments from the source index into the target index. If the file system does not support hard-linking, all segments are copied into the new index, which is a much more time consuming process. -* Finally, it recovers the target index as though it were a closed index which had just been re-opened. +#### Request (object) [_request_indices.get_template] +- **`name` (Optional, string | string[])**: List of index template names used to limit the request. +Wildcard (`*`) expressions are supported. +To return all index templates, omit this parameter or use a value of `_all` or `*`. +- **`flat_settings` (Optional, boolean)**: If `true`, returns settings in flat format. +- **`local` (Optional, boolean)**: If `true`, the request retrieves information from the local node only. +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. -::::{important} -Indices can only be cloned if they meet the following requirements: -:::: +## client.indices.migrateReindex [_indices.migrate_reindex] +Reindex legacy backing indices. +Reindex all legacy backing indices for a data stream. +This operation occurs in a persistent task. +The persistent task ID is returned immediately and the reindexing work is completed in that task. -* The index must be marked as read-only and have a cluster health status of green. -* The target index must not exist. -* The source index must have the same number of primary shards as the target index. -* The node handling the clone process must have sufficient free disk space to accommodate a second copy of the existing index. +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-indices-migrate-reindex) -The current write index on a data stream cannot be cloned. In order to clone the current write index, the data stream must first be rolled over so that a new write index is created and then the previous write index can be cloned. +```ts +client.indices.migrateReindex({ ... }) +``` -::::{note} -Mappings cannot be specified in the `_clone` request. The mappings of the source index will be used for the target index. -:::: +### Arguments [_arguments_indices.migrate_reindex] +#### Request (object) [_request_indices.migrate_reindex] +- **`reindex` (Optional, { mode, source })** -**Monitor the cloning process** +## client.indices.migrateToDataStream [_indices.migrate_to_data_stream] +Convert an index alias to a data stream. +Converts an index alias to a data stream. +You must have a matching index template that is data stream enabled. +The alias must meet the following criteria: +The alias must have a write index; +All indices for the alias must have a `@timestamp` field mapping of a `date` or `date_nanos` field type; +The alias must not have any filters; +The alias must not use custom routing. +If successful, the request removes the alias and creates a data stream with the same name. +The indices for the alias become hidden backing indices for the stream. +The write index for the alias becomes the write index for the stream. -The cloning process can be monitored with the cat recovery API or the cluster health API can be used to wait until all primary shards have been allocated by setting the `wait_for_status` parameter to `yellow`. +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-indices-migrate-to-data-stream) -The `_clone` API returns as soon as the target index has been added to the cluster state, before any shards have been allocated. At this point, all shards are in the state unassigned. If, for any reason, the target index can’t be allocated, its primary shard will remain unassigned until it can be allocated on that node. +```ts +client.indices.migrateToDataStream({ name }) +``` -Once the primary shard is allocated, it moves to state initializing, and the clone process begins. When the clone operation completes, the shard will become active. At that point, Elasticsearch will try to allocate any replicas and may decide to relocate the primary shard to another node. +### Arguments [_arguments_indices.migrate_to_data_stream] -**Wait for active shards** +#### Request (object) [_request_indices.migrate_to_data_stream] +- **`name` (string)**: Name of the index alias to convert to a data stream. +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. -Because the clone operation creates a new index to clone the shards to, the wait for active shards setting on index creation applies to the clone index action as well. +## client.indices.modifyDataStream [_indices.modify_data_stream] +Update data streams. +Performs one or more data stream modification actions in a single atomic operation. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-clone) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-indices-modify-data-stream) ```ts -client.indices.clone({ index, target }) +client.indices.modifyDataStream({ actions }) ``` +### Arguments [_arguments_indices.modify_data_stream] -### Arguments [_arguments_166] - -* **Request (object):** +#### Request (object) [_request_indices.modify_data_stream] +- **`actions` ({ add_backing_index, remove_backing_index }[])**: Actions to perform. - * **`index` (string)**: Name of the source index to clone. - * **`target` (string)**: Name of the target index to create. - * **`aliases` (Optional, Record)**: Aliases for the resulting index. - * **`settings` (Optional, Record)**: Configuration options for the target index. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - * **`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))**: The number of shard copies that must be active before proceeding with the operation. Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). +## client.indices.open [_indices.open] +Open a closed index. +For data streams, the API opens any closed backing indices. +A closed index is blocked for read/write operations and does not allow all operations that opened indices allow. +It is not possible to index documents or to search for documents in a closed index. +This allows closed indices to not have to maintain internal data structures for indexing or searching documents, resulting in a smaller overhead on the cluster. +When opening or closing an index, the master is responsible for restarting the index shards to reflect the new state of the index. +The shards will then go through the normal recovery process. +The data of opened or closed indices is automatically replicated by the cluster to ensure that enough shard copies are safely kept around at all times. -### close [_close] +You can open and close multiple indices. +An error is thrown if the request explicitly refers to a missing index. +This behavior can be turned off by using the `ignore_unavailable=true` parameter. -Close an index. A closed index is blocked for read or write operations and does not allow all operations that opened indices allow. It is not possible to index documents or to search for documents in a closed index. Closed indices do not have to maintain internal data structures for indexing or searching documents, which results in a smaller overhead on the cluster. +By default, you must explicitly name the indices you are opening or closing. +To open or close indices with `_all`, `*`, or other wildcard expressions, change the `action.destructive_requires_name` setting to `false`. +This setting can also be changed with the cluster update settings API. -When opening or closing an index, the master node is responsible for restarting the index shards to reflect the new state of the index. The shards will then go through the normal recovery process. The data of opened and closed indices is automatically replicated by the cluster to ensure that enough shard copies are safely kept around at all times. +Closed indices consume a significant amount of disk-space which can cause problems in managed environments. +Closing indices can be turned off with the cluster settings API by setting `cluster.indices.close.enable` to `false`. -You can open and close multiple indices. An error is thrown if the request explicitly refers to a missing index. This behaviour can be turned off using the `ignore_unavailable=true` parameter. +Because opening or closing an index allocates its shards, the `wait_for_active_shards` setting on index creation applies to the `_open` and `_close` index actions as well. -By default, you must explicitly name the indices you are opening or closing. To open or close indices with `_all`, `*`, or other wildcard expressions, change the` action.destructive_requires_name` setting to `false`. This setting can also be changed with the cluster update settings API. +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-indices-open) -Closed indices consume a significant amount of disk-space which can cause problems in managed environments. Closing indices can be turned off with the cluster settings API by setting `cluster.indices.close.enable` to `false`. +```ts +client.indices.open({ index }) +``` -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-close) +### Arguments [_arguments_indices.open] + +#### Request (object) [_request_indices.open] +- **`index` (string | string[])**: List of data streams, indices, and aliases used to limit the request. +Supports wildcards (`*`). +By default, you must explicitly name the indices you using to limit the request. +To limit a request using `_all`, `*`, or other wildcard expressions, change the `action.destructive_requires_name` setting to false. +You can update this setting in the `elasticsearch.yml` file or using the cluster update settings API. +- **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. +This behavior applies even if the request targets other open indices. +- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +Supports a list of values, such as `open,hidden`. +Valid values are: `all`, `open`, `closed`, `hidden`, `none`. +- **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. +- **`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))**: The number of shard copies that must be active before proceeding with the operation. +Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). + +## client.indices.promoteDataStream [_indices.promote_data_stream] +Promote a data stream. +Promote a data stream from a replicated data stream managed by cross-cluster replication (CCR) to a regular data stream. + +With CCR auto following, a data stream from a remote cluster can be replicated to the local cluster. +These data streams can't be rolled over in the local cluster. +These replicated data streams roll over only if the upstream data stream rolls over. +In the event that the remote cluster is no longer available, the data stream in the local cluster can be promoted to a regular data stream, which allows these data streams to be rolled over in the local cluster. + +NOTE: When promoting a data stream, ensure the local cluster has a data stream enabled index template that matches the data stream. +If this is missing, the data stream will not be able to roll over until a matching index template is created. +This will affect the lifecycle management of the data stream and interfere with the data stream size and retention. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-indices-promote-data-stream) ```ts -client.indices.close({ index }) +client.indices.promoteDataStream({ name }) ``` +### Arguments [_arguments_indices.promote_data_stream] -### Arguments [_arguments_167] +#### Request (object) [_request_indices.promote_data_stream] +- **`name` (string)**: The name of the data stream +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. -* **Request (object):** +## client.indices.putAlias [_indices.put_alias] +Create or update an alias. +Adds a data stream or index to an alias. - * **`index` (string | string[])**: List or wildcard expression of index names used to limit the request. - * **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. - * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. - * **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - * **`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))**: The number of shard copies that must be active before proceeding with the operation. Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-indices-put-alias) +```ts +client.indices.putAlias({ index, name }) +``` +### Arguments [_arguments_indices.put_alias] + +#### Request (object) [_request_indices.put_alias] +- **`index` (string | string[])**: List of data streams or indices to add. +Supports wildcards (`*`). +Wildcard patterns that match both data streams and indices return an error. +- **`name` (string)**: Alias to update. +If the alias doesn’t exist, the request creates it. +Index alias names support date math. +- **`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: Query used to limit documents the alias can access. +- **`index_routing` (Optional, string)**: Value used to route indexing operations to a specific shard. +If specified, this overwrites the `routing` value for indexing operations. +Data stream aliases don’t support this parameter. +- **`is_write_index` (Optional, boolean)**: If `true`, sets the write index or data stream for the alias. +If an alias points to multiple indices or data streams and `is_write_index` isn’t set, the alias rejects write requests. +If an index alias points to one index and `is_write_index` isn’t set, the index automatically acts as the write index. +Data stream aliases don’t automatically set a write data stream, even if the alias points to one data stream. +- **`routing` (Optional, string)**: Value used to route indexing and search operations to a specific shard. +Data stream aliases don’t support this parameter. +- **`search_routing` (Optional, string)**: Value used to route search operations to a specific shard. +If specified, this overwrites the `routing` value for search operations. +Data stream aliases don’t support this parameter. +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. + +## client.indices.putDataLifecycle [_indices.put_data_lifecycle] +Update data stream lifecycles. +Update the data stream lifecycle of the specified data streams. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-indices-put-data-lifecycle) -### create [_create_2] +```ts +client.indices.putDataLifecycle({ name }) +``` -Create an index. You can use the create index API to add a new index to an Elasticsearch cluster. When creating an index, you can specify the following: +### Arguments [_arguments_indices.put_data_lifecycle] + +#### Request (object) [_request_indices.put_data_lifecycle] +- **`name` (string | string[])**: List of data streams used to limit the request. +Supports wildcards (`*`). +To target all data streams use `*` or `_all`. +- **`data_retention` (Optional, string | -1 | 0)**: If defined, every document added to this data stream will be stored at least for this time frame. +Any time after this duration the document could be deleted. +When empty, every document in this data stream will be stored indefinitely. +- **`downsampling` (Optional, { rounds })**: The downsampling configuration to execute for the managed backing index after rollover. +- **`enabled` (Optional, boolean)**: If defined, it turns data stream lifecycle on/off (`true`/`false`) for this data stream. A data stream lifecycle +that's disabled (enabled: `false`) will have no effect on the data stream. +- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of data stream that wildcard patterns can match. +Supports a list of values, such as `open,hidden`. +Valid values are: `all`, `hidden`, `open`, `closed`, `none`. +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is +received before the timeout expires, the request fails and returns an +error. +- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. + +## client.indices.putIndexTemplate [_indices.put_index_template] +Create or update an index template. +Index templates define settings, mappings, and aliases that can be applied automatically to new indices. + +Elasticsearch applies templates to new indices based on an wildcard pattern that matches the index name. +Index templates are applied during data stream or index creation. +For data streams, these settings and mappings are applied when the stream's backing indices are created. +Settings and mappings specified in a create index API request override any settings or mappings specified in an index template. +Changes to index templates do not affect existing indices, including the existing backing indices of a data stream. + +You can use C-style `/* *\/` block comments in index templates. +You can include comments anywhere in the request body, except before the opening curly bracket. -* Settings for the index. -* Mappings for fields in the index. -* Index aliases +**Multiple matching templates** -**Wait for active shards** +If multiple index templates match the name of a new index or data stream, the template with the highest priority is used. -By default, index creation will only return a response to the client when the primary copies of each shard have been started, or the request times out. The index creation response will indicate what happened. For example, `acknowledged` indicates whether the index was successfully created in the cluster, `while shards_acknowledged` indicates whether the requisite number of shard copies were started for each shard in the index before timing out. Note that it is still possible for either `acknowledged` or `shards_acknowledged` to be `false`, but for the index creation to be successful. These values simply indicate whether the operation completed before the timeout. If `acknowledged` is false, the request timed out before the cluster state was updated with the newly created index, but it probably will be created sometime soon. If `shards_acknowledged` is false, then the request timed out before the requisite number of shards were started (by default just the primaries), even if the cluster state was successfully updated to reflect the newly created index (that is to say, `acknowledged` is `true`). +Multiple templates with overlapping index patterns at the same priority are not allowed and an error will be thrown when attempting to create a template matching an existing index template at identical priorities. + +**Composing aliases, mappings, and settings** -You can change the default of only waiting for the primary shards to start through the index setting `index.write.wait_for_active_shards`. Note that changing this setting will also affect the `wait_for_active_shards` value on all subsequent write operations. +When multiple component templates are specified in the `composed_of` field for an index template, they are merged in the order specified, meaning that later component templates override earlier component templates. +Any mappings, settings, or aliases from the parent index template are merged in next. +Finally, any configuration on the index request itself is merged. +Mapping definitions are merged recursively, which means that later mapping components can introduce new field mappings and update the mapping configuration. +If a field mapping is already contained in an earlier component, its definition will be completely overwritten by the later one. +This recursive merging strategy applies not only to field mappings, but also root options like `dynamic_templates` and `meta`. +If an earlier component contains a `dynamic_templates` block, then by default new `dynamic_templates` entries are appended onto the end. +If an entry already exists with the same key, then it is overwritten by the new definition. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-create) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-indices-put-index-template) ```ts -client.indices.create({ index }) +client.indices.putIndexTemplate({ name }) ``` +### Arguments [_arguments_indices.put_index_template] + +#### Request (object) [_request_indices.put_index_template] +- **`name` (string)**: Index or template name +- **`index_patterns` (Optional, string | string[])**: Name of the index template to create. +- **`composed_of` (Optional, string[])**: An ordered list of component template names. +Component templates are merged in the order specified, meaning that the last component template specified has the highest precedence. +- **`template` (Optional, { aliases, mappings, settings, lifecycle })**: Template to be applied. +It may optionally include an `aliases`, `mappings`, or `settings` configuration. +- **`data_stream` (Optional, { hidden, allow_custom_routing })**: If this object is included, the template is used to create data streams and their backing indices. +Supports an empty object. +Data streams require a matching index template with a `data_stream` object. +- **`priority` (Optional, number)**: Priority to determine index template precedence when a new data stream or index is created. +The index template with the highest priority is chosen. +If no priority is specified the template is treated as though it is of priority 0 (lowest priority). +This number is not automatically generated by Elasticsearch. +- **`version` (Optional, number)**: Version number used to manage index templates externally. +This number is not automatically generated by Elasticsearch. +External systems can use these version numbers to simplify template management. +To unset a version, replace the template without specifying one. +- **`_meta` (Optional, Record)**: Optional user metadata about the index template. +It may have any contents. +It is not automatically generated or used by Elasticsearch. +This user-defined object is stored in the cluster state, so keeping it short is preferable +To unset the metadata, replace the template without specifying it. +- **`allow_auto_create` (Optional, boolean)**: This setting overrides the value of the `action.auto_create_index` cluster setting. +If set to `true` in a template, then indices can be automatically created using that template even if auto-creation of indices is disabled via `actions.auto_create_index`. +If set to `false`, then indices or data streams matching the template must always be explicitly created, and may never be automatically created. +- **`ignore_missing_component_templates` (Optional, string[])**: The configuration option ignore_missing_component_templates can be used when an index template +references a component template that might not exist +- **`deprecated` (Optional, boolean)**: Marks this index template as deprecated. When creating or updating a non-deprecated index template +that uses deprecated components, Elasticsearch will emit a deprecation warning. +- **`create` (Optional, boolean)**: If `true`, this request cannot replace or update existing index templates. +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +- **`cause` (Optional, string)**: User defined reason for creating/updating the index template + +## client.indices.putMapping [_indices.put_mapping] +Update field mappings. +Add new fields to an existing data stream or index. +You can also use this API to change the search settings of existing fields and add new properties to existing object fields. +For data streams, these changes are applied to all backing indices by default. -### Arguments [_arguments_168] +**Add multi-fields to an existing field** -* **Request (object):** +Multi-fields let you index the same field in different ways. +You can use this API to update the fields mapping parameter and enable multi-fields for an existing field. +WARNING: If an index (or data stream) contains documents when you add a multi-field, those documents will not have values for the new multi-field. +You can populate the new multi-field with the update by query API. - * **`index` (string)**: Name of the index you wish to create. - * **`aliases` (Optional, Record)**: Aliases for the index. - * **`mappings` (Optional, { all_field, date_detection, dynamic, dynamic_date_formats, dynamic_templates, _field_names, index_field, _meta, numeric_detection, properties, _routing, _size, _source, runtime, enabled, subobjects, _data_stream_timestamp })**: Mapping for fields in the index. If specified, this mapping can include: +**Change supported mapping parameters for an existing field** - * Field names - * Field data types - * Mapping parameters +The documentation for each mapping parameter indicates whether you can update it for an existing field using this API. +For example, you can use the update mapping API to update the `ignore_above` parameter. - * **`settings` (Optional, { index, mode, routing_path, soft_deletes, sort, number_of_shards, number_of_replicas, number_of_routing_shards, check_on_startup, codec, routing_partition_size, load_fixed_bitset_filters_eagerly, hidden, auto_expand_replicas, merge, search, refresh_interval, max_result_window, max_inner_result_window, max_rescore_window, max_docvalue_fields_search, max_script_fields, max_ngram_diff, max_shingle_diff, blocks, max_refresh_listeners, analyze, highlight, max_terms_count, max_regex_length, routing, gc_deletes, default_pipeline, final_pipeline, lifecycle, provided_name, creation_date, creation_date_string, uuid, version, verified_before_close, format, max_slices_per_scroll, translog, query_string, priority, top_metrics_max_size, analysis, settings, time_series, queries, similarity, mapping, indexing.slowlog, indexing_pressure, store })**: Configuration options for the index. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - * **`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))**: The number of shard copies that must be active before proceeding with the operation. Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). +**Change the mapping of an existing field** +Except for supported mapping parameters, you can't change the mapping or field type of an existing field. +Changing an existing field could invalidate data that's already indexed. +If you need to change the mapping of a field in a data stream's backing indices, refer to documentation about modifying data streams. +If you need to change the mapping of a field in other indices, create a new index with the correct mapping and reindex your data into that index. -### create_data_stream [_create_data_stream] +**Rename a field** -Create a data stream. Creates a data stream. You must have a matching index template with data stream enabled. +Renaming a field would invalidate data already indexed under the old field name. +Instead, add an alias field to create an alternate field name. -[Endpoint documentation](docs-content://manage-data/data-store/data-streams.md) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-indices-put-mapping) ```ts -client.indices.createDataStream({ name }) +client.indices.putMapping({ index }) ``` +### Arguments [_arguments_indices.put_mapping] + +#### Request (object) [_request_indices.put_mapping] +- **`index` (string | string[])**: A list of index names the mapping should be added to (supports wildcards); use `_all` or omit to add the mapping on all indices. +- **`date_detection` (Optional, boolean)**: Controls whether dynamic date detection is enabled. +- **`dynamic` (Optional, Enum("strict" | "runtime" | true | false))**: Controls whether new fields are added dynamically. +- **`dynamic_date_formats` (Optional, string[])**: If date detection is enabled then new string fields are checked +against 'dynamic_date_formats' and if the value matches then +a new date field is added instead of string. +- **`dynamic_templates` (Optional, Record[])**: Specify dynamic templates for the mapping. +- **`_field_names` (Optional, { enabled })**: Control whether field names are enabled for the index. +- **`_meta` (Optional, Record)**: A mapping type can have custom meta data associated with it. These are +not used at all by Elasticsearch, but can be used to store +application-specific metadata. +- **`numeric_detection` (Optional, boolean)**: Automatically map strings into numeric data types for all fields. +- **`properties` (Optional, Record)**: Mapping for a field. For new fields, this mapping can include: + +- Field name +- Field data type +- Mapping parameters +- **`_routing` (Optional, { required })**: Enable making a routing value required on indexed documents. +- **`_source` (Optional, { compress, compress_threshold, enabled, excludes, includes, mode })**: Control whether the _source field is enabled on the index. +- **`runtime` (Optional, Record)**: Mapping of runtime fields for the index. +- **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. +This behavior applies even if the request targets other open indices. +- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +Supports a list of values, such as `open,hidden`. +Valid values are: `all`, `open`, `closed`, `hidden`, `none`. +- **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. +- **`write_index_only` (Optional, boolean)**: If `true`, the mappings are applied only to the current write index for the target. + +## client.indices.putSettings [_indices.put_settings] +Update index settings. +Changes dynamic index settings in real time. +For data streams, index setting changes are applied to all backing indices by default. + +To revert a setting to the default value, use a null value. +The list of per-index settings that can be updated dynamically on live indices can be found in index module documentation. +To preserve existing settings from being updated, set the `preserve_existing` parameter to `true`. + +NOTE: You can only define new analyzers on closed indices. +To add an analyzer, you must close the index, define the analyzer, and reopen the index. +You cannot close the write index of a data stream. +To update the analyzer for a data stream's write index and future backing indices, update the analyzer in the index template used by the stream. +Then roll over the data stream to apply the new analyzer to the stream's write index and future backing indices. +This affects searches and any new data added to the stream after the rollover. +However, it does not affect the data stream's backing indices or their existing data. +To change the analyzer for existing backing indices, you must create a new data stream and reindex your data into it. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-indices-put-settings) -### Arguments [_arguments_169] - -* **Request (object):** - - * **`name` (string)**: Name of the data stream, which must meet the following criteria: Lowercase only; Cannot include `\`, `/`, `*`, `?`, `"`, `<`, `>`, `|`, `,`, `#`, `:`, or a space character; Cannot start with `-`, `_`, `+`, or `.ds-`; Cannot be `.` or `..`; Cannot be longer than 255 bytes. Multi-byte characters count towards this limit faster. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - +```ts +client.indices.putSettings({ ... }) +``` +### Arguments [_arguments_indices.put_settings] + +#### Request (object) [_request_indices.put_settings] +- **`index` (Optional, string | string[])**: List of data streams, indices, and aliases used to limit +the request. Supports wildcards (`*`). To target all data streams and +indices, omit this parameter or use `*` or `_all`. +- **`settings` (Optional, { index, mode, routing_path, soft_deletes, sort, number_of_shards, number_of_replicas, number_of_routing_shards, check_on_startup, codec, routing_partition_size, load_fixed_bitset_filters_eagerly, hidden, auto_expand_replicas, merge, search, refresh_interval, max_result_window, max_inner_result_window, max_rescore_window, max_docvalue_fields_search, max_script_fields, max_ngram_diff, max_shingle_diff, blocks, max_refresh_listeners, analyze, highlight, max_terms_count, max_regex_length, routing, gc_deletes, default_pipeline, final_pipeline, lifecycle, provided_name, creation_date, creation_date_string, uuid, version, verified_before_close, format, max_slices_per_scroll, translog, query_string, priority, top_metrics_max_size, analysis, settings, time_series, queries, similarity, mapping, indexing.slowlog, indexing_pressure, store })** +- **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index +alias, or `_all` value targets only missing or closed indices. This +behavior applies even if the request targets other open indices. For +example, a request targeting `foo*,bar*` returns an error if an index +starts with `foo` but no index starts with `bar`. +- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. If the request can target +data streams, this argument determines whether wildcard expressions match +hidden data streams. Supports a list of values, such as +`open,hidden`. +- **`flat_settings` (Optional, boolean)**: If `true`, returns settings in flat format. +- **`ignore_unavailable` (Optional, boolean)**: If `true`, returns settings in flat format. +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is +received before the timeout expires, the request fails and returns an +error. +- **`preserve_existing` (Optional, boolean)**: If `true`, existing index settings remain unchanged. +- **`reopen` (Optional, boolean)**: Whether to close and reopen the index to apply non-dynamic settings. +If set to `true` the indices to which the settings are being applied +will be closed temporarily and then reopened in order to apply the changes. +- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the + timeout expires, the request fails and returns an error. + +## client.indices.putTemplate [_indices.put_template] +Create or update an index template. +Index templates define settings, mappings, and aliases that can be applied automatically to new indices. +Elasticsearch applies templates to new indices based on an index pattern that matches the index name. + +IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8. + +Composable templates always take precedence over legacy templates. +If no composable template matches a new index, matching legacy templates are applied according to their order. + +Index templates are only applied during index creation. +Changes to index templates do not affect existing indices. +Settings and mappings specified in create index API requests override any settings or mappings specified in an index template. + +You can use C-style `/* *\/` block comments in index templates. +You can include comments anywhere in the request body, except before the opening curly bracket. -### create_from [_create_from] +**Indices matching multiple templates** -Create an index from a source index. +Multiple index templates can potentially match an index, in this case, both the settings and mappings are merged into the final configuration of the index. +The order of the merging can be controlled using the order parameter, with lower order being applied first, and higher orders overriding them. +NOTE: Multiple matching templates with the same order value will result in a non-deterministic merging order. -Copy the mappings and settings from the source index to a destination index while allowing request settings and mappings to override the source values. +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-indices-put-template) ```ts -client.indices.createFrom({ source, dest }) +client.indices.putTemplate({ name }) ``` +### Arguments [_arguments_indices.put_template] + +#### Request (object) [_request_indices.put_template] +- **`name` (string)**: The name of the template +- **`aliases` (Optional, Record)**: Aliases for the index. +- **`index_patterns` (Optional, string | string[])**: Array of wildcard expressions used to match the names +of indices during creation. +- **`mappings` (Optional, { all_field, date_detection, dynamic, dynamic_date_formats, dynamic_templates, _field_names, index_field, _meta, numeric_detection, properties, _routing, _size, _source, runtime, enabled, subobjects, _data_stream_timestamp })**: Mapping for fields in the index. +- **`order` (Optional, number)**: Order in which Elasticsearch applies this template if index +matches multiple templates. + +Templates with lower 'order' values are merged first. Templates with higher +'order' values are merged later, overriding templates with lower values. +- **`settings` (Optional, { index, mode, routing_path, soft_deletes, sort, number_of_shards, number_of_replicas, number_of_routing_shards, check_on_startup, codec, routing_partition_size, load_fixed_bitset_filters_eagerly, hidden, auto_expand_replicas, merge, search, refresh_interval, max_result_window, max_inner_result_window, max_rescore_window, max_docvalue_fields_search, max_script_fields, max_ngram_diff, max_shingle_diff, blocks, max_refresh_listeners, analyze, highlight, max_terms_count, max_regex_length, routing, gc_deletes, default_pipeline, final_pipeline, lifecycle, provided_name, creation_date, creation_date_string, uuid, version, verified_before_close, format, max_slices_per_scroll, translog, query_string, priority, top_metrics_max_size, analysis, settings, time_series, queries, similarity, mapping, indexing.slowlog, indexing_pressure, store })**: Configuration options for the index. +- **`version` (Optional, number)**: Version number used to manage index templates externally. This number +is not automatically generated by Elasticsearch. +To unset a version, replace the template without specifying one. +- **`create` (Optional, boolean)**: If true, this request cannot replace or update existing index templates. +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is +received before the timeout expires, the request fails and returns an error. +- **`cause` (Optional, string)**: User defined reason for creating/updating the index template + +## client.indices.recovery [_indices.recovery] +Get index recovery information. +Get information about ongoing and completed shard recoveries for one or more indices. +For data streams, the API returns information for the stream's backing indices. -### Arguments [_arguments_170] - -* **Request (object):** +All recoveries, whether ongoing or complete, are kept in the cluster state and may be reported on at any time. - * **`source` (string)**: The source index or data stream name - * **`dest` (string)**: The destination index or data stream name - * **`create_from` (Optional, { mappings_override, settings_override, remove_index_blocks })** +Shard recovery is the process of initializing a shard copy, such as restoring a primary shard from a snapshot or creating a replica shard from a primary shard. +When a shard recovery completes, the recovered shard is available for search and indexing. +Recovery automatically occurs during the following processes: +* When creating an index for the first time. +* When a node rejoins the cluster and starts up any missing primary shard copies using the data that it holds in its data path. +* Creation of new replica shard copies from the primary. +* Relocation of a shard copy to a different node in the same cluster. +* A snapshot restore operation. +* A clone, shrink, or split operation. -### data_streams_stats [_data_streams_stats] +You can determine the cause of a shard recovery using the recovery or cat recovery APIs. -Get data stream stats. Retrieves statistics for one or more data streams. +The index recovery API reports information about completed recoveries only for shard copies that currently exist in the cluster. +It only reports the last recovery for each shard copy and does not report historical information about earlier recoveries, nor does it report information about the recoveries of shard copies that no longer exist. +This means that if a shard copy completes a recovery and then Elasticsearch relocates it onto a different node then the information about the original recovery will not be shown in the recovery API. -[Endpoint documentation](docs-content://manage-data/data-store/data-streams.md) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-indices-recovery) ```ts -client.indices.dataStreamsStats({ ... }) +client.indices.recovery({ ... }) ``` +### Arguments [_arguments_indices.recovery] -### Arguments [_arguments_171] - -* **Request (object):** - - * **`name` (Optional, string)**: List of data streams used to limit the request. Wildcard expressions (`*`) are supported. To target all data streams in a cluster, omit this parameter or use `*`. - * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of data stream that wildcard patterns can match. Supports a list of values, such as `open,hidden`. +#### Request (object) [_request_indices.recovery] +- **`index` (Optional, string | string[])**: List of data streams, indices, and aliases used to limit the request. +Supports wildcards (`*`). +To target all data streams and indices, omit this parameter or use `*` or `_all`. +- **`active_only` (Optional, boolean)**: If `true`, the response only includes ongoing shard recoveries. +- **`detailed` (Optional, boolean)**: If `true`, the response includes detailed information about shard recoveries. +## client.indices.refresh [_indices.refresh] +Refresh an index. +A refresh makes recent operations performed on one or more indices available for search. +For data streams, the API runs the refresh operation on the stream’s backing indices. +By default, Elasticsearch periodically refreshes indices every second, but only on indices that have received one search request or more in the last 30 seconds. +You can change this default interval with the `index.refresh_interval` setting. -### delete [_delete_5] +Refresh requests are synchronous and do not return a response until the refresh operation completes. -Delete indices. Deleting an index deletes its documents, shards, and metadata. It does not delete related Kibana components, such as data views, visualizations, or dashboards. +Refreshes are resource-intensive. +To ensure good cluster performance, it's recommended to wait for Elasticsearch's periodic refresh rather than performing an explicit refresh when possible. -You cannot delete the current write index of a data stream. To delete the index, you must roll over the data stream so a new write index is created. You can then use the delete index API to delete the previous write index. +If your application workflow indexes documents and then runs a search to retrieve the indexed document, it's recommended to use the index API's `refresh=wait_for` query parameter option. +This option ensures the indexing operation waits for a periodic refresh before running the search. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-delete) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-indices-refresh) ```ts -client.indices.delete({ index }) +client.indices.refresh({ ... }) ``` +### Arguments [_arguments_indices.refresh] -### Arguments [_arguments_172] - -* **Request (object):** +#### Request (object) [_request_indices.refresh] +- **`index` (Optional, string | string[])**: List of data streams, indices, and aliases used to limit the request. +Supports wildcards (`*`). +To target all data streams and indices, omit this parameter or use `*` or `_all`. +- **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. +This behavior applies even if the request targets other open indices. +- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +Supports a list of values, such as `open,hidden`. +Valid values are: `all`, `open`, `closed`, `hidden`, `none`. +- **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. - * **`index` (string | string[])**: List of indices to delete. You cannot specify index aliases. By default, this parameter does not support wildcards (`*`) or `_all`. To use wildcards or `_all`, set the `action.destructive_requires_name` cluster setting to `false`. - * **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. - * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. - * **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +## client.indices.reloadSearchAnalyzers [_indices.reload_search_analyzers] +Reload search analyzers. +Reload an index's search analyzers and their resources. +For data streams, the API reloads search analyzers and resources for the stream's backing indices. +IMPORTANT: After reloading the search analyzers you should clear the request cache to make sure it doesn't contain responses derived from the previous versions of the analyzer. +You can use the reload search analyzers API to pick up changes to synonym files used in the `synonym_graph` or `synonym` token filter of a search analyzer. +To be eligible, the token filter must have an `updateable` flag of `true` and only be used in search analyzers. -### delete_alias [_delete_alias] +NOTE: This API does not perform a reload for each shard of an index. +Instead, it performs a reload for each node containing index shards. +As a result, the total shard count returned by the API can differ from the number of index shards. +Because reloading affects every node with an index shard, it is important to update the synonym file on every data node in the cluster--including nodes that don't contain a shard replica--before using this API. +This ensures the synonym file is updated everywhere in the cluster in case shards are relocated in the future. -Delete an alias. Removes a data stream or index from an alias. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-delete-alias) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-indices-reload-search-analyzers) ```ts -client.indices.deleteAlias({ index, name }) +client.indices.reloadSearchAnalyzers({ index }) ``` +### Arguments [_arguments_indices.reload_search_analyzers] -### Arguments [_arguments_173] - -* **Request (object):** +#### Request (object) [_request_indices.reload_search_analyzers] +- **`index` (string | string[])**: A list of index names to reload analyzers for +- **`allow_no_indices` (Optional, boolean)**: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) +- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Whether to expand wildcard expression to concrete indices that are open, closed or both. +- **`ignore_unavailable` (Optional, boolean)**: Whether specified concrete indices should be ignored when unavailable (missing or closed) +- **`resource` (Optional, string)**: Changed resource to reload analyzers from if applicable - * **`index` (string | string[])**: List of data streams or indices used to limit the request. Supports wildcards (`*`). - * **`name` (string | string[])**: List of aliases to remove. Supports wildcards (`*`). To remove all aliases, use `*` or `_all`. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +## client.indices.resolveCluster [_indices.resolve_cluster] +Resolve the cluster. +Resolve the specified index expressions to return information about each cluster, including the local "querying" cluster, if included. +If no index expression is provided, the API will return information about all the remote clusters that are configured on the querying cluster. +This endpoint is useful before doing a cross-cluster search in order to determine which remote clusters should be included in a search. -### delete_data_lifecycle [_delete_data_lifecycle] - -Delete data stream lifecycles. Removes the data stream lifecycle from a data stream, rendering it not managed by the data stream lifecycle. +You use the same index expression with this endpoint as you would for cross-cluster search. +Index and cluster exclusions are also supported with this endpoint. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-delete-data-lifecycle) +For each cluster in the index expression, information is returned about: -```ts -client.indices.deleteDataLifecycle({ name }) -``` +* Whether the querying ("local") cluster is currently connected to each remote cluster specified in the index expression. Note that this endpoint actively attempts to contact the remote clusters, unlike the `remote/info` endpoint. +* Whether each remote cluster is configured with `skip_unavailable` as `true` or `false`. +* Whether there are any indices, aliases, or data streams on that cluster that match the index expression. +* Whether the search is likely to have errors returned when you do the cross-cluster search (including any authorization errors if you do not have permission to query the index). +* Cluster version information, including the Elasticsearch server version. +For example, `GET /_resolve/cluster/my-index-*,cluster*:my-index-*` returns information about the local cluster and all remotely configured clusters that start with the alias `cluster*`. +Each cluster returns information about whether it has any indices, aliases or data streams that match `my-index-*`. -### Arguments [_arguments_174] +## Note on backwards compatibility +The ability to query without an index expression was added in version 8.18, so when +querying remote clusters older than that, the local cluster will send the index +expression `dummy*` to those remote clusters. Thus, if an errors occur, you may see a reference +to that index expression even though you didn't request it. If it causes a problem, you can +instead include an index expression like `*:*` to bypass the issue. -* **Request (object):** +## Advantages of using this endpoint before a cross-cluster search - * **`name` (string | string[])**: A list of data streams of which the data stream lifecycle will be deleted; use `*` to get all data streams - * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Whether wildcard expressions should get expanded to open or closed indices (default: open) - * **`master_timeout` (Optional, string | -1 | 0)**: Specify timeout for connection to master - * **`timeout` (Optional, string | -1 | 0)**: Explicit timestamp for the document +You may want to exclude a cluster or index from a search when: +* A remote cluster is not currently connected and is configured with `skip_unavailable=false`. Running a cross-cluster search under those conditions will cause the entire search to fail. +* A cluster has no matching indices, aliases or data streams for the index expression (or your user does not have permissions to search them). For example, suppose your index expression is `logs*,remote1:logs*` and the remote1 cluster has no indices, aliases or data streams that match `logs*`. In that case, that cluster will return no results from that cluster if you include it in a cross-cluster search. +* The index expression (combined with any query parameters you specify) will likely cause an exception to be thrown when you do the search. In these cases, the "error" field in the `_resolve/cluster` response will be present. (This is also where security/permission errors will be shown.) +* A remote cluster is an older version that does not support the feature you want to use in your search. +## Test availability of remote clusters -### delete_data_stream [_delete_data_stream] +The `remote/info` endpoint is commonly used to test whether the "local" cluster (the cluster being queried) is connected to its remote clusters, but it does not necessarily reflect whether the remote cluster is available or not. +The remote cluster may be available, while the local cluster is not currently connected to it. -Delete data streams. Deletes one or more data streams and their backing indices. +You can use the `_resolve/cluster` API to attempt to reconnect to remote clusters. +For example with `GET _resolve/cluster` or `GET _resolve/cluster/*:*`. +The `connected` field in the response will indicate whether it was successful. +If a connection was (re-)established, this will also cause the `remote/info` endpoint to now indicate a connected status. -[Endpoint documentation](docs-content://manage-data/data-store/data-streams.md) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-indices-resolve-cluster) ```ts -client.indices.deleteDataStream({ name }) +client.indices.resolveCluster({ ... }) ``` +### Arguments [_arguments_indices.resolve_cluster] -### Arguments [_arguments_175] - -* **Request (object):** - - * **`name` (string | string[])**: List of data streams to delete. Wildcard (`*`) expressions are supported. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of data stream that wildcard patterns can match. Supports a list of values,such as `open,hidden`. - - +#### Request (object) [_request_indices.resolve_cluster] +- **`name` (Optional, string | string[])**: A list of names or index patterns for the indices, aliases, and data streams to resolve. +Resources on remote clusters can be specified using the ``:`` syntax. +Index and cluster exclusions (e.g., `-cluster1:*`) are also supported. +If no index expression is specified, information about all remote clusters configured on the local cluster +is returned without doing any index matching +- **`allow_no_indices` (Optional, boolean)**: If false, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing +or closed indices. This behavior applies even if the request targets other open indices. For example, a request +targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. +NOTE: This option is only supported when specifying an index expression. You will get an error if you specify index +options to the `_resolve/cluster` API endpoint that takes no index expression. +- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +Supports a list of values, such as `open,hidden`. +Valid values are: `all`, `open`, `closed`, `hidden`, `none`. +NOTE: This option is only supported when specifying an index expression. You will get an error if you specify index +options to the `_resolve/cluster` API endpoint that takes no index expression. +- **`ignore_throttled` (Optional, boolean)**: If true, concrete, expanded, or aliased indices are ignored when frozen. +NOTE: This option is only supported when specifying an index expression. You will get an error if you specify index +options to the `_resolve/cluster` API endpoint that takes no index expression. +- **`ignore_unavailable` (Optional, boolean)**: If false, the request returns an error if it targets a missing or closed index. +NOTE: This option is only supported when specifying an index expression. You will get an error if you specify index +options to the `_resolve/cluster` API endpoint that takes no index expression. +- **`timeout` (Optional, string | -1 | 0)**: The maximum time to wait for remote clusters to respond. +If a remote cluster does not respond within this timeout period, the API response +will show the cluster as not connected and include an error message that the +request timed out. -### delete_index_template [_delete_index_template] +The default timeout is unset and the query can take +as long as the networking layer is configured to wait for remote clusters that are +not responding (typically 30 seconds). -Delete an index template. The provided may contain multiple template names separated by a comma. If multiple template names are specified then there is no wildcard support and the provided names should match completely with existing templates. +## client.indices.resolveIndex [_indices.resolve_index] +Resolve indices. +Resolve the names and/or index patterns for indices, aliases, and data streams. +Multiple patterns and remote clusters are supported. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-delete-index-template) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-indices-resolve-index) ```ts -client.indices.deleteIndexTemplate({ name }) +client.indices.resolveIndex({ name }) ``` +### Arguments [_arguments_indices.resolve_index] -### Arguments [_arguments_176] - -* **Request (object):** - - * **`name` (string | string[])**: List of index template names used to limit the request. Wildcard (*) expressions are supported. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +#### Request (object) [_request_indices.resolve_index] +- **`name` (string | string[])**: Comma-separated name(s) or index pattern(s) of the indices, aliases, and data streams to resolve. +Resources on remote clusters can be specified using the ``:`` syntax. +- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +Supports a list of values, such as `open,hidden`. +Valid values are: `all`, `open`, `closed`, `hidden`, `none`. +- **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. +- **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. +This behavior applies even if the request targets other open indices. +For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. +## client.indices.rollover [_indices.rollover] +Roll over to a new index. +TIP: It is recommended to use the index lifecycle rollover action to automate rollovers. +The rollover API creates a new index for a data stream or index alias. +The API behavior depends on the rollover target. -### delete_template [_delete_template] - -Delete a legacy index template. +**Roll over a data stream** -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-delete-template) +If you roll over a data stream, the API creates a new write index for the stream. +The stream's previous write index becomes a regular backing index. +A rollover also increments the data stream's generation. -```ts -client.indices.deleteTemplate({ name }) -``` +**Roll over an index alias with a write index** +TIP: Prior to Elasticsearch 7.9, you'd typically use an index alias with a write index to manage time series data. +Data streams replace this functionality, require less maintenance, and automatically integrate with data tiers. -### Arguments [_arguments_177] +If an index alias points to multiple indices, one of the indices must be a write index. +The rollover API creates a new write index for the alias with `is_write_index` set to `true`. +The API also `sets is_write_index` to `false` for the previous write index. -* **Request (object):** +**Roll over an index alias with one index** - * **`name` (string)**: The name of the legacy index template to delete. Wildcard (`*`) expressions are supported. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +If you roll over an index alias that points to only one index, the API creates a new index for the alias and removes the original index from the alias. +NOTE: A rollover creates a new index and is subject to the `wait_for_active_shards` setting. +**Increment index names for an alias** -### disk_usage [_disk_usage] +When you roll over an index alias, you can specify a name for the new index. +If you don't specify a name and the current index ends with `-` and a number, such as `my-index-000001` or `my-index-3`, the new index name increments that number. +For example, if you roll over an alias with a current index of `my-index-000001`, the rollover creates a new index named `my-index-000002`. +This number is always six characters and zero-padded, regardless of the previous index's name. -Analyze the index disk usage. Analyze the disk usage of each field of an index or data stream. This API might not support indices created in previous Elasticsearch versions. The result of a small index can be inaccurate as some parts of an index might not be analyzed by the API. +If you use an index alias for time series data, you can use date math in the index name to track the rollover date. +For example, you can create an alias that points to an index named ``. +If you create the index on May 6, 2099, the index's name is `my-index-2099.05.06-000001`. +If you roll over the alias on May 7, 2099, the new index's name is `my-index-2099.05.07-000002`. -::::{note} -The total size of fields of the analyzed shards of the index in the response is usually smaller than the index `store_size` value because some small metadata files are ignored and some parts of data files might not be scanned by the API. Since stored fields are stored together in a compressed format, the sizes of stored fields are also estimates and can be inaccurate. The stored size of the `_id` field is likely underestimated while the `_source` field is overestimated. -:::: +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-indices-rollover) +```ts +client.indices.rollover({ alias }) +``` -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-disk-usage) +### Arguments [_arguments_indices.rollover] + +#### Request (object) [_request_indices.rollover] +- **`alias` (string)**: Name of the data stream or index alias to roll over. +- **`new_index` (Optional, string)**: Name of the index to create. +Supports date math. +Data streams do not support this parameter. +- **`aliases` (Optional, Record)**: Aliases for the target index. +Data streams do not support this parameter. +- **`conditions` (Optional, { min_age, max_age, max_age_millis, min_docs, max_docs, max_size, max_size_bytes, min_size, min_size_bytes, max_primary_shard_size, max_primary_shard_size_bytes, min_primary_shard_size, min_primary_shard_size_bytes, max_primary_shard_docs, min_primary_shard_docs })**: Conditions for the rollover. +If specified, Elasticsearch only performs the rollover if the current index satisfies these conditions. +If this parameter is not specified, Elasticsearch performs the rollover unconditionally. +If conditions are specified, at least one of them must be a `max_*` condition. +The index will rollover if any `max_*` condition is satisfied and all `min_*` conditions are satisfied. +- **`mappings` (Optional, { all_field, date_detection, dynamic, dynamic_date_formats, dynamic_templates, _field_names, index_field, _meta, numeric_detection, properties, _routing, _size, _source, runtime, enabled, subobjects, _data_stream_timestamp })**: Mapping for fields in the index. +If specified, this mapping can include field names, field data types, and mapping paramaters. +- **`settings` (Optional, Record)**: Configuration options for the index. +Data streams do not support this parameter. +- **`dry_run` (Optional, boolean)**: If `true`, checks whether the current index satisfies the specified conditions but does not perform a rollover. +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. +- **`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))**: The number of shard copies that must be active before proceeding with the operation. +Set to all or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). +- **`lazy` (Optional, boolean)**: If set to true, the rollover action will only mark a data stream to signal that it needs to be rolled over at the next write. +Only allowed on data streams. + +## client.indices.segments [_indices.segments] +Get index segments. +Get low-level information about the Lucene segments in index shards. +For data streams, the API returns information about the stream's backing indices. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-indices-segments) ```ts -client.indices.diskUsage({ index }) +client.indices.segments({ ... }) ``` +### Arguments [_arguments_indices.segments] -### Arguments [_arguments_178] +#### Request (object) [_request_indices.segments] +- **`index` (Optional, string | string[])**: List of data streams, indices, and aliases used to limit the request. +Supports wildcards (`*`). +To target all data streams and indices, omit this parameter or use `*` or `_all`. +- **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. +This behavior applies even if the request targets other open indices. +- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +Supports a list of values, such as `open,hidden`. +Valid values are: `all`, `open`, `closed`, `hidden`, `none`. +- **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. -* **Request (object):** +## client.indices.shardStores [_indices.shard_stores] +Get index shard stores. +Get store information about replica shards in one or more indices. +For data streams, the API retrieves store information for the stream's backing indices. - * **`index` (string | string[])**: List of data streams, indices, and aliases used to limit the request. It’s recommended to execute this API with a single index (or the latest backing index of a data stream) as the API consumes resources significantly. - * **`allow_no_indices` (Optional, boolean)**: If false, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. - * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. - * **`flush` (Optional, boolean)**: If `true`, the API performs a flush before analysis. If `false`, the response may not include uncommitted data. - * **`ignore_unavailable` (Optional, boolean)**: If `true`, missing or closed indices are not included in the response. - * **`run_expensive_tasks` (Optional, boolean)**: Analyzing field disk usage is resource-intensive. To use the API, this parameter must be set to `true`. +The index shard stores API returns the following information: +* The node on which each replica shard exists. +* The allocation ID for each replica shard. +* A unique ID for each replica shard. +* Any errors encountered while opening the shard index or from an earlier failure. +By default, the API returns store information only for primary shards that are unassigned or have one or more unassigned replica shards. -### downsample [_downsample] +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-indices-shard-stores) -Downsample an index. Aggregate a time series (TSDS) index and store pre-computed statistical summaries (`min`, `max`, `sum`, `value_count` and `avg`) for each metric field grouped by a configured time interval. For example, a TSDS index that contains metrics sampled every 10 seconds can be downsampled to an hourly index. All documents within an hour interval are summarized and stored as a single document in the downsample index. +```ts +client.indices.shardStores({ ... }) +``` -::::{note} -Only indices in a time series data stream are supported. Neither field nor document level security can be defined on the source index. The source index must be read only (`index.blocks.write: true`). -:::: +### Arguments [_arguments_indices.shard_stores] +#### Request (object) [_request_indices.shard_stores] +- **`index` (Optional, string | string[])**: List of data streams, indices, and aliases used to limit the request. +- **`allow_no_indices` (Optional, boolean)**: If false, the request returns an error if any wildcard expression, index alias, or _all +value targets only missing or closed indices. This behavior applies even if the request +targets other open indices. +- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, +this argument determines whether wildcard expressions match hidden data streams. +- **`ignore_unavailable` (Optional, boolean)**: If true, missing or closed indices are not included in the response. +- **`status` (Optional, Enum("green" | "yellow" | "red" | "all") | Enum("green" | "yellow" | "red" | "all")[])**: List of shard health statuses used to limit the request. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-downsample) +## client.indices.shrink [_indices.shrink] +Shrink an index. +Shrink an index into a new index with fewer primary shards. -```ts -client.indices.downsample({ index, target_index }) -``` +Before you can shrink an index: +* The index must be read-only. +* A copy of every shard in the index must reside on the same node. +* The index must have a green health status. -### Arguments [_arguments_179] +To make shard allocation easier, we recommend you also remove the index's replica shards. +You can later re-add replica shards as part of the shrink operation. -* **Request (object):** +The requested number of primary shards in the target index must be a factor of the number of shards in the source index. +For example an index with 8 primary shards can be shrunk into 4, 2 or 1 primary shards or an index with 15 primary shards can be shrunk into 5, 3 or 1. +If the number of shards in the index is a prime number it can only be shrunk into a single primary shard + Before shrinking, a (primary or replica) copy of every shard in the index must be present on the same node. - * **`index` (string)**: Name of the time series index to downsample. - * **`target_index` (string)**: Name of the index to create. - * **`config` (Optional, { fixed_interval })** +The current write index on a data stream cannot be shrunk. In order to shrink the current write index, the data stream must first be rolled over so that a new write index is created and then the previous write index can be shrunk. +A shrink operation: +* Creates a new target index with the same definition as the source index, but with a smaller number of primary shards. +* Hard-links segments from the source index into the target index. If the file system does not support hard-linking, then all segments are copied into the new index, which is a much more time consuming process. Also if using multiple data paths, shards on different data paths require a full copy of segment files if they are not on the same disk since hardlinks do not work across disks. +* Recovers the target index as though it were a closed index which had just been re-opened. Recovers shards to the `.routing.allocation.initial_recovery._id` index setting. -### exists [_exists_2] +IMPORTANT: Indices can only be shrunk if they satisfy the following requirements: -Check indices. Check if one or more indices, index aliases, or data streams exist. +* The target index must not exist. +* The source index must have more primary shards than the target index. +* The number of primary shards in the target index must be a factor of the number of primary shards in the source index. The source index must have more primary shards than the target index. +* The index must not contain more than 2,147,483,519 documents in total across all shards that will be shrunk into a single shard on the target index as this is the maximum number of docs that can fit into a single shard. +* The node handling the shrink process must have sufficient free disk space to accommodate a second copy of the existing index. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-exists) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-indices-shrink) ```ts -client.indices.exists({ index }) +client.indices.shrink({ index, target }) ``` +### Arguments [_arguments_indices.shrink] -### Arguments [_arguments_180] +#### Request (object) [_request_indices.shrink] +- **`index` (string)**: Name of the source index to shrink. +- **`target` (string)**: Name of the target index to create. +- **`aliases` (Optional, Record)**: The key is the alias name. +Index alias names support date math. +- **`settings` (Optional, Record)**: Configuration options for the target index. +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. +- **`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))**: The number of shard copies that must be active before proceeding with the operation. +Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). -* **Request (object):** +## client.indices.simulateIndexTemplate [_indices.simulate_index_template] +Simulate an index. +Get the index configuration that would be applied to the specified index from an existing index template. - * **`index` (string | string[])**: List of data streams, indices, and aliases. Supports wildcards (`*`). - * **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. - * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. - * **`flat_settings` (Optional, boolean)**: If `true`, returns settings in flat format. - * **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. - * **`include_defaults` (Optional, boolean)**: If `true`, return all default settings in the response. - * **`local` (Optional, boolean)**: If `true`, the request retrieves information from the local node only. +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-indices-simulate-index-template) +```ts +client.indices.simulateIndexTemplate({ name }) +``` +### Arguments [_arguments_indices.simulate_index_template] -### exists_alias [_exists_alias] +#### Request (object) [_request_indices.simulate_index_template] +- **`name` (string)**: Name of the index to simulate +- **`create` (Optional, boolean)**: Whether the index template we optionally defined in the body should only be dry-run added if new or can also replace an existing one +- **`cause` (Optional, string)**: User defined reason for dry-run creating the new template for simulation purposes +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +- **`include_defaults` (Optional, boolean)**: If true, returns all relevant default configurations for the index template. -Check aliases. Checks if one or more data stream or index aliases exist. +## client.indices.simulateTemplate [_indices.simulate_template] +Simulate an index template. +Get the index configuration that would be applied by a particular index template. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-update-aliases) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-indices-simulate-template) ```ts -client.indices.existsAlias({ name }) +client.indices.simulateTemplate({ ... }) ``` +### Arguments [_arguments_indices.simulate_template] + +#### Request (object) [_request_indices.simulate_template] +- **`name` (Optional, string)**: Name of the index template to simulate. To test a template configuration before you add it to the cluster, omit +this parameter and specify the template configuration in the request body. +- **`allow_auto_create` (Optional, boolean)**: This setting overrides the value of the `action.auto_create_index` cluster setting. +If set to `true` in a template, then indices can be automatically created using that template even if auto-creation of indices is disabled via `actions.auto_create_index`. +If set to `false`, then indices or data streams matching the template must always be explicitly created, and may never be automatically created. +- **`index_patterns` (Optional, string | string[])**: Array of wildcard (`*`) expressions used to match the names of data streams and indices during creation. +- **`composed_of` (Optional, string[])**: An ordered list of component template names. +Component templates are merged in the order specified, meaning that the last component template specified has the highest precedence. +- **`template` (Optional, { aliases, mappings, settings, lifecycle })**: Template to be applied. +It may optionally include an `aliases`, `mappings`, or `settings` configuration. +- **`data_stream` (Optional, { hidden, allow_custom_routing })**: If this object is included, the template is used to create data streams and their backing indices. +Supports an empty object. +Data streams require a matching index template with a `data_stream` object. +- **`priority` (Optional, number)**: Priority to determine index template precedence when a new data stream or index is created. +The index template with the highest priority is chosen. +If no priority is specified the template is treated as though it is of priority 0 (lowest priority). +This number is not automatically generated by Elasticsearch. +- **`version` (Optional, number)**: Version number used to manage index templates externally. +This number is not automatically generated by Elasticsearch. +- **`_meta` (Optional, Record)**: Optional user metadata about the index template. +May have any contents. +This map is not automatically generated by Elasticsearch. +- **`ignore_missing_component_templates` (Optional, string[])**: The configuration option ignore_missing_component_templates can be used when an index template +references a component template that might not exist +- **`deprecated` (Optional, boolean)**: Marks this index template as deprecated. When creating or updating a non-deprecated index template +that uses deprecated components, Elasticsearch will emit a deprecation warning. +- **`create` (Optional, boolean)**: If true, the template passed in the body is only used if no existing templates match the same index patterns. If false, the simulation uses the template with the highest priority. Note that the template is not permanently added or updated in either case; it is only used for the simulation. +- **`cause` (Optional, string)**: User defined reason for dry-run creating the new template for simulation purposes +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +- **`include_defaults` (Optional, boolean)**: If true, returns all relevant default configurations for the index template. + +## client.indices.split [_indices.split] +Split an index. +Split an index into a new index with more primary shards. +* Before you can split an index: + +* The index must be read-only. +* The cluster health status must be green. + +You can do make an index read-only with the following request using the add index block API: -### Arguments [_arguments_181] +``` +PUT /my_source_index/_block/write +``` -* **Request (object):** +The current write index on a data stream cannot be split. +In order to split the current write index, the data stream must first be rolled over so that a new write index is created and then the previous write index can be split. - * **`name` (string | string[])**: List of aliases to check. Supports wildcards (`*`). - * **`index` (Optional, string | string[])**: List of data streams or indices used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. - * **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. - * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. - * **`ignore_unavailable` (Optional, boolean)**: If `false`, requests that include a missing data stream or index in the target indices or data streams return an error. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +The number of times the index can be split (and the number of shards that each original shard can be split into) is determined by the `index.number_of_routing_shards` setting. +The number of routing shards specifies the hashing space that is used internally to distribute documents across shards with consistent hashing. +For instance, a 5 shard index with `number_of_routing_shards` set to 30 (5 x 2 x 3) could be split by a factor of 2 or 3. +A split operation: +* Creates a new target index with the same definition as the source index, but with a larger number of primary shards. +* Hard-links segments from the source index into the target index. If the file system doesn't support hard-linking, all segments are copied into the new index, which is a much more time consuming process. +* Hashes all documents again, after low level files are created, to delete documents that belong to a different shard. +* Recovers the target index as though it were a closed index which had just been re-opened. -### exists_index_template [_exists_index_template] +IMPORTANT: Indices can only be split if they satisfy the following requirements: -Check index templates. Check whether index templates exist. +* The target index must not exist. +* The source index must have fewer primary shards than the target index. +* The number of primary shards in the target index must be a multiple of the number of primary shards in the source index. +* The node handling the split process must have sufficient free disk space to accommodate a second copy of the existing index. -[Endpoint documentation](docs-content://manage-data/data-store/templates.md) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-indices-split) ```ts -client.indices.existsIndexTemplate({ name }) +client.indices.split({ index, target }) ``` +### Arguments [_arguments_indices.split] -### Arguments [_arguments_182] +#### Request (object) [_request_indices.split] +- **`index` (string)**: Name of the source index to split. +- **`target` (string)**: Name of the target index to create. +- **`aliases` (Optional, Record)**: Aliases for the resulting index. +- **`settings` (Optional, Record)**: Configuration options for the target index. +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. +- **`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))**: The number of shard copies that must be active before proceeding with the operation. +Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). -* **Request (object):** +## client.indices.stats [_indices.stats] +Get index statistics. +For data streams, the API retrieves statistics for the stream's backing indices. - * **`name` (string)**: List of index template names used to limit the request. Wildcard (*) expressions are supported. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +By default, the returned statistics are index-level with `primaries` and `total` aggregations. +`primaries` are the values for only the primary shards. +`total` are the accumulated values for both primary and replica shards. + +To get shard-level statistics, set the `level` parameter to `shards`. +NOTE: When moving to another node, the shard-level statistics for a shard are cleared. +Although the shard is no longer part of the node, that node retains any node-level statistics to which the shard contributed. +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-indices-stats) -### exists_template [_exists_template] +```ts +client.indices.stats({ ... }) +``` -Check existence of index templates. Get information about whether index templates exist. Index templates define settings, mappings, and aliases that can be applied automatically to new indices. +### Arguments [_arguments_indices.stats] -::::{important} -This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8. -:::: +#### Request (object) [_request_indices.stats] +- **`metric` (Optional, string | string[])**: Limit the information returned the specific metrics. +- **`index` (Optional, string | string[])**: A list of index names; use `_all` or empty string to perform the operation on all indices +- **`completion_fields` (Optional, string | string[])**: List or wildcard expressions of fields to include in fielddata and suggest statistics. +- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument +determines whether wildcard expressions match hidden data streams. Supports a list of values, +such as `open,hidden`. +- **`fielddata_fields` (Optional, string | string[])**: List or wildcard expressions of fields to include in fielddata statistics. +- **`fields` (Optional, string | string[])**: List or wildcard expressions of fields to include in the statistics. +- **`forbid_closed_indices` (Optional, boolean)**: If true, statistics are not collected from closed indices. +- **`groups` (Optional, string | string[])**: List of search groups to include in the search statistics. +- **`include_segment_file_sizes` (Optional, boolean)**: If true, the call reports the aggregated disk usage of each one of the Lucene index files (only applies if segment stats are requested). +- **`include_unloaded_segments` (Optional, boolean)**: If true, the response includes information from segments that are not loaded into memory. +- **`level` (Optional, Enum("cluster" | "indices" | "shards"))**: Indicates whether statistics are aggregated at the cluster, index, or shard level. +## client.indices.updateAliases [_indices.update_aliases] +Create or update an alias. +Adds a data stream or index to an alias. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-exists-template) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-indices-update-aliases) ```ts -client.indices.existsTemplate({ name }) +client.indices.updateAliases({ ... }) ``` +### Arguments [_arguments_indices.update_aliases] -### Arguments [_arguments_183] +#### Request (object) [_request_indices.update_aliases] +- **`actions` (Optional, { add_backing_index, remove_backing_index }[])**: Actions to perform. +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. -* **Request (object):** +## client.indices.validateQuery [_indices.validate_query] +Validate a query. +Validates a query without running it. - * **`name` (string | string[])**: A list of index template names used to limit the request. Wildcard (`*`) expressions are supported. - * **`flat_settings` (Optional, boolean)**: Indicates whether to use a flat format for the response. - * **`local` (Optional, boolean)**: Indicates whether to get information from the local node only. - * **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-indices-validate-query) +```ts +client.indices.validateQuery({ ... }) +``` +### Arguments [_arguments_indices.validate_query] -### explain_data_lifecycle [_explain_data_lifecycle] +#### Request (object) [_request_indices.validate_query] +- **`index` (Optional, string | string[])**: List of data streams, indices, and aliases to search. +Supports wildcards (`*`). +To search all data streams or indices, omit this parameter or use `*` or `_all`. +- **`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: Query in the Lucene query string syntax. +- **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. +This behavior applies even if the request targets other open indices. +- **`all_shards` (Optional, boolean)**: If `true`, the validation is executed on all shards instead of one random shard per index. +- **`analyzer` (Optional, string)**: Analyzer to use for the query string. +This parameter can only be used when the `q` query string parameter is specified. +- **`analyze_wildcard` (Optional, boolean)**: If `true`, wildcard and prefix queries are analyzed. +- **`default_operator` (Optional, Enum("and" | "or"))**: The default operator for query string query: `AND` or `OR`. +- **`df` (Optional, string)**: Field to use as default where no field prefix is given in the query string. +This parameter can only be used when the `q` query string parameter is specified. +- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. +If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. +Supports a list of values, such as `open,hidden`. +Valid values are: `all`, `open`, `closed`, `hidden`, `none`. +- **`explain` (Optional, boolean)**: If `true`, the response returns detailed information if an error has occurred. +- **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. +- **`lenient` (Optional, boolean)**: If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. +- **`rewrite` (Optional, boolean)**: If `true`, returns a more detailed explanation showing the actual Lucene query that will be executed. +- **`q` (Optional, string)**: Query in the Lucene query string syntax. -Get the status for a data stream lifecycle. Get information about an index or data stream’s current data stream lifecycle status, such as time since index creation, time since rollover, the lifecycle configuration managing the index, or any errors encountered during lifecycle execution. +## client.inference.chatCompletionUnified [_inference.chat_completion_unified] +Perform chat completion inference -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-explain-data-lifecycle) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-inference-unified-inference) ```ts -client.indices.explainDataLifecycle({ index }) +client.inference.chatCompletionUnified({ inference_id }) ``` +### Arguments [_arguments_inference.chat_completion_unified] -### Arguments [_arguments_184] - -* **Request (object):** +#### Request (object) [_request_inference.chat_completion_unified] +- **`inference_id` (string)**: The inference Id +- **`chat_completion_request` (Optional, { messages, model, max_completion_tokens, stop, temperature, tool_choice, tools, top_p })** +- **`timeout` (Optional, string | -1 | 0)**: Specifies the amount of time to wait for the inference request to complete. - * **`index` (string | string[])**: The name of the index to explain - * **`include_defaults` (Optional, boolean)**: indicates if the API should return the default values the system uses for the index’s lifecycle - * **`master_timeout` (Optional, string | -1 | 0)**: Specify timeout for connection to master +## client.inference.completion [_inference.completion] +Perform completion inference on the service +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-inference-inference) +```ts +client.inference.completion({ inference_id, input }) +``` -### field_usage_stats [_field_usage_stats] +### Arguments [_arguments_inference.completion] -Get field usage stats. Get field usage information for each shard and field of an index. Field usage statistics are automatically captured when queries are running on a cluster. A shard-level search request that accesses a given field, even if multiple times during that request, is counted as a single use. +#### Request (object) [_request_inference.completion] +- **`inference_id` (string)**: The inference Id +- **`input` (string | string[])**: Inference input. +Either a string or an array of strings. +- **`task_settings` (Optional, User-defined value)**: Optional task settings +- **`timeout` (Optional, string | -1 | 0)**: Specifies the amount of time to wait for the inference request to complete. -The response body reports the per-shard usage count of the data structures that back the fields in the index. A given request will increment each count by a maximum value of 1, even if the request accesses the same field multiple times. +## client.inference.delete [_inference.delete] +Delete an inference endpoint -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-field-usage-stats) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-inference-delete) ```ts -client.indices.fieldUsageStats({ index }) +client.inference.delete({ inference_id }) ``` +### Arguments [_arguments_inference.delete] + +#### Request (object) [_request_inference.delete] +- **`inference_id` (string)**: The inference identifier. +- **`task_type` (Optional, Enum("sparse_embedding" | "text_embedding" | "rerank" | "completion" | "chat_completion"))**: The task type +- **`dry_run` (Optional, boolean)**: When true, the endpoint is not deleted and a list of ingest processors which reference this endpoint is returned. +- **`force` (Optional, boolean)**: When true, the inference endpoint is forcefully deleted even if it is still being used by ingest processors or semantic text fields. -### Arguments [_arguments_185] +## client.inference.get [_inference.get] +Get an inference endpoint -* **Request (object):** +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-inference-get) - * **`index` (string | string[])**: List or wildcard expression of index names used to limit the request. - * **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. - * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. - * **`ignore_unavailable` (Optional, boolean)**: If `true`, missing or closed indices are not included in the response. - * **`fields` (Optional, string | string[])**: List or wildcard expressions of fields to include in the statistics. - * **`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))**: The number of shard copies that must be active before proceeding with the operation. Set to all or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). +```ts +client.inference.get({ ... }) +``` +### Arguments [_arguments_inference.get] +#### Request (object) [_request_inference.get] +- **`task_type` (Optional, Enum("sparse_embedding" | "text_embedding" | "rerank" | "completion" | "chat_completion"))**: The task type +- **`inference_id` (Optional, string)**: The inference Id -### flush [_flush] +## client.inference.inference [_inference.inference] +Perform inference on the service. -Flush data streams or indices. Flushing a data stream or index is the process of making sure that any data that is currently only stored in the transaction log is also permanently stored in the Lucene index. When restarting, Elasticsearch replays any unflushed operations from the transaction log into the Lucene index to bring it back into the state that it was in before the restart. Elasticsearch automatically triggers flushes as needed, using heuristics that trade off the size of the unflushed transaction log against the cost of performing each flush. +This API enables you to use machine learning models to perform specific tasks on data that you provide as an input. +It returns a response with the results of the tasks. +The inference endpoint you use can perform one specific task that has been defined when the endpoint was created with the create inference API. -After each operation has been flushed it is permanently stored in the Lucene index. This may mean that there is no need to maintain an additional copy of it in the transaction log. The transaction log is made up of multiple files, called generations, and Elasticsearch will delete any generation files when they are no longer needed, freeing up disk space. +For details about using this API with a service, such as Amazon Bedrock, Anthropic, or HuggingFace, refer to the service-specific documentation. -It is also possible to trigger a flush on one or more indices using the flush API, although it is rare for users to need to call this API directly. If you call the flush API after indexing some documents then a successful response indicates that Elasticsearch has flushed all the documents that were indexed before the flush API was called. +> info +> The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-flush) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-inference-inference) ```ts -client.indices.flush({ ... }) +client.inference.inference({ inference_id, input }) ``` +### Arguments [_arguments_inference.inference] -### Arguments [_arguments_186] +#### Request (object) [_request_inference.inference] +- **`inference_id` (string)**: The unique identifier for the inference endpoint. +- **`input` (string | string[])**: The text on which you want to perform the inference task. +It can be a single string or an array. -* **Request (object):** +> info +> Inference endpoints for the `completion` task type currently only support a single string as input. +- **`task_type` (Optional, Enum("sparse_embedding" | "text_embedding" | "rerank" | "completion" | "chat_completion"))**: The type of inference task that the model performs. +- **`query` (Optional, string)**: The query input, which is required only for the `rerank` task. +It is not required for other tasks. +- **`task_settings` (Optional, User-defined value)**: Task settings for the individual inference request. +These settings are specific to the task type you specified and override the task settings specified when initializing the service. +- **`timeout` (Optional, string | -1 | 0)**: The amount of time to wait for the inference request to complete. - * **`index` (Optional, string | string[])**: List of data streams, indices, and aliases to flush. Supports wildcards (`*`). To flush all data streams and indices, omit this parameter or use `*` or `_all`. - * **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. - * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. - * **`force` (Optional, boolean)**: If `true`, the request forces a flush even if there are no changes to commit to the index. - * **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. - * **`wait_if_ongoing` (Optional, boolean)**: If `true`, the flush operation blocks until execution when another flush operation is running. If `false`, Elasticsearch returns an error if you request a flush when another flush operation is running. +## client.inference.put [_inference.put] +Create an inference endpoint. +When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. +After creating the endpoint, wait for the model deployment to complete before using it. +To verify the deployment status, use the get trained model statistics API. +Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. +Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. +IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. +For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. +However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs. +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-inference-put) -### forcemerge [_forcemerge] - -Force a merge. Perform the force merge operation on the shards of one or more indices. For data streams, the API forces a merge on the shards of the stream’s backing indices. +```ts +client.inference.put({ inference_id }) +``` -Merging reduces the number of segments in each shard by merging some of them together and also frees up the space used by deleted documents. Merging normally happens automatically, but sometimes it is useful to trigger a merge manually. +### Arguments [_arguments_inference.put] -::::{warning} -We recommend force merging only a read-only index (meaning the index is no longer receiving writes). When documents are updated or deleted, the old version is not immediately removed but instead soft-deleted and marked with a "tombstone". These soft-deleted documents are automatically cleaned up during regular segment merges. But force merge can cause very large (greater than 5 GB) segments to be produced, which are not eligible for regular merges. So the number of soft-deleted documents can then grow rapidly, resulting in higher disk usage and worse search performance. If you regularly force merge an index receiving writes, this can also make snapshots more expensive, since the new documents can’t be backed up incrementally. -:::: +#### Request (object) [_request_inference.put] +- **`inference_id` (string)**: The inference Id +- **`task_type` (Optional, Enum("sparse_embedding" | "text_embedding" | "rerank" | "completion" | "chat_completion"))**: The task type +- **`inference_config` (Optional, { chunking_settings, service, service_settings, task_settings })** +## client.inference.putAlibabacloud [_inference.put_alibabacloud] +Create an AlibabaCloud AI Search inference endpoint. -**Blocks during a force merge** +Create an inference endpoint to perform an inference task with the `alibabacloud-ai-search` service. -Calls to this API block until the merge is complete (unless request contains `wait_for_completion=false`). If the client connection is lost before completion then the force merge process will continue in the background. Any new requests to force merge the same indices will also block until the ongoing force merge is complete. +When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. +After creating the endpoint, wait for the model deployment to complete before using it. +To verify the deployment status, use the get trained model statistics API. +Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. +Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. -**Running force merge asynchronously** +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-inference-put-alibabacloud) -If the request contains `wait_for_completion=false`, Elasticsearch performs some preflight checks, launches the request, and returns a task you can use to get the status of the task. However, you can not cancel this task as the force merge task is not cancelable. Elasticsearch creates a record of this task as a document at `_tasks/`. When you are done with a task, you should delete the task document so Elasticsearch can reclaim the space. +```ts +client.inference.putAlibabacloud({ task_type, alibabacloud_inference_id, service, service_settings }) +``` -**Force merging multiple indices** +### Arguments [_arguments_inference.put_alibabacloud] -You can force merge multiple indices with a single request by targeting: +#### Request (object) [_request_inference.put_alibabacloud] +- **`task_type` (Enum("completion" | "rerank" | "space_embedding" | "text_embedding"))**: The type of the inference task that the model will perform. +- **`alibabacloud_inference_id` (string)**: The unique identifier of the inference endpoint. +- **`service` (Enum("alibabacloud-ai-search"))**: The type of service supported for the specified task type. In this case, `alibabacloud-ai-search`. +- **`service_settings` ({ api_key, host, rate_limit, service_id, workspace })**: Settings used to install the inference model. These settings are specific to the `alibabacloud-ai-search` service. +- **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, strategy })**: The chunking configuration object. +- **`task_settings` (Optional, { input_type, return_token })**: Settings to configure the inference task. +These settings are specific to the task type you specified. -* One or more data streams that contain multiple backing indices -* Multiple indices -* One or more aliases -* All data streams and indices in a cluster +## client.inference.putAmazonbedrock [_inference.put_amazonbedrock] +Create an Amazon Bedrock inference endpoint. -Each targeted shard is force-merged separately using the force_merge threadpool. By default each node only has a single `force_merge` thread which means that the shards on that node are force-merged one at a time. If you expand the `force_merge` threadpool on a node then it will force merge its shards in parallel +Creates an inference endpoint to perform an inference task with the `amazonbedrock` service. -Force merge makes the storage for the shard being merged temporarily increase, as it may require free space up to triple its size in case `max_num_segments parameter` is set to `1`, to rewrite all segments into a new one. +>info +> You need to provide the access and secret keys only once, during the inference model creation. The get inference API does not retrieve your access or secret keys. After creating the inference model, you cannot change the associated key pairs. If you want to use a different access and secret key pair, delete the inference model and recreate it with the same name and the updated keys. -**Data streams and time-based indices** +When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. +After creating the endpoint, wait for the model deployment to complete before using it. +To verify the deployment status, use the get trained model statistics API. +Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. +Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. -Force-merging is useful for managing a data stream’s older backing indices and other time-based indices, particularly after a rollover. In these cases, each index only receives indexing traffic for a certain period of time. Once an index receive no more writes, its shards can be force-merged to a single segment. This can be a good idea because single-segment shards can sometimes use simpler and more efficient data structures to perform searches. For example: +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-inference-put-amazonbedrock) -``` -POST /.ds-my-data-stream-2099.03.07-000001/_forcemerge?max_num_segments=1 +```ts +client.inference.putAmazonbedrock({ task_type, amazonbedrock_inference_id, service, service_settings }) ``` -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-forcemerge) +### Arguments [_arguments_inference.put_amazonbedrock] -```ts -client.indices.forcemerge({ ... }) -``` +#### Request (object) [_request_inference.put_amazonbedrock] +- **`task_type` (Enum("completion" | "text_embedding"))**: The type of the inference task that the model will perform. +- **`amazonbedrock_inference_id` (string)**: The unique identifier of the inference endpoint. +- **`service` (Enum("amazonbedrock"))**: The type of service supported for the specified task type. In this case, `amazonbedrock`. +- **`service_settings` ({ access_key, model, provider, region, rate_limit, secret_key })**: Settings used to install the inference model. These settings are specific to the `amazonbedrock` service. +- **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, strategy })**: The chunking configuration object. +- **`task_settings` (Optional, { max_new_tokens, temperature, top_k, top_p })**: Settings to configure the inference task. +These settings are specific to the task type you specified. +## client.inference.putAnthropic [_inference.put_anthropic] +Create an Anthropic inference endpoint. -### Arguments [_arguments_187] +Create an inference endpoint to perform an inference task with the `anthropic` service. -* **Request (object):** +When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. +After creating the endpoint, wait for the model deployment to complete before using it. +To verify the deployment status, use the get trained model statistics API. +Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. +Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-inference-put-anthropic) + +```ts +client.inference.putAnthropic({ task_type, anthropic_inference_id, service, service_settings }) +``` - * **`index` (Optional, string | string[])**: A list of index names; use `_all` or empty string to perform the operation on all indices - * **`allow_no_indices` (Optional, boolean)**: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) - * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Whether to expand wildcard expression to concrete indices that are open, closed or both. - * **`flush` (Optional, boolean)**: Specify whether the index should be flushed after performing the operation (default: true) - * **`ignore_unavailable` (Optional, boolean)**: Whether specified concrete indices should be ignored when unavailable (missing or closed) - * **`max_num_segments` (Optional, number)**: The number of segments the index should be merged into (default: dynamic) - * **`only_expunge_deletes` (Optional, boolean)**: Specify whether the operation should only expunge deleted documents - * **`wait_for_completion` (Optional, boolean)**: Should the request wait until the force merge is completed. +### Arguments [_arguments_inference.put_anthropic] +#### Request (object) [_request_inference.put_anthropic] +- **`task_type` (Enum("completion"))**: The task type. +The only valid task type for the model to perform is `completion`. +- **`anthropic_inference_id` (string)**: The unique identifier of the inference endpoint. +- **`service` (Enum("anthropic"))**: The type of service supported for the specified task type. In this case, `anthropic`. +- **`service_settings` ({ api_key, model_id, rate_limit })**: Settings used to install the inference model. These settings are specific to the `watsonxai` service. +- **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, strategy })**: The chunking configuration object. +- **`task_settings` (Optional, { max_tokens, temperature, top_k, top_p })**: Settings to configure the inference task. +These settings are specific to the task type you specified. +## client.inference.putAzureaistudio [_inference.put_azureaistudio] +Create an Azure AI studio inference endpoint. -### get [_get_5] +Create an inference endpoint to perform an inference task with the `azureaistudio` service. -Get index information. Get information about one or more indices. For data streams, the API returns information about the stream’s backing indices. +When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. +After creating the endpoint, wait for the model deployment to complete before using it. +To verify the deployment status, use the get trained model statistics API. +Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. +Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-inference-put-azureaistudio) ```ts -client.indices.get({ index }) +client.inference.putAzureaistudio({ task_type, azureaistudio_inference_id, service, service_settings }) ``` +### Arguments [_arguments_inference.put_azureaistudio] -### Arguments [_arguments_188] +#### Request (object) [_request_inference.put_azureaistudio] +- **`task_type` (Enum("completion" | "text_embedding"))**: The type of the inference task that the model will perform. +- **`azureaistudio_inference_id` (string)**: The unique identifier of the inference endpoint. +- **`service` (Enum("azureaistudio"))**: The type of service supported for the specified task type. In this case, `azureaistudio`. +- **`service_settings` ({ api_key, endpoint_type, target, provider, rate_limit })**: Settings used to install the inference model. These settings are specific to the `openai` service. +- **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, strategy })**: The chunking configuration object. +- **`task_settings` (Optional, { do_sample, max_new_tokens, temperature, top_p, user })**: Settings to configure the inference task. +These settings are specific to the task type you specified. -* **Request (object):** +## client.inference.putAzureopenai [_inference.put_azureopenai] +Create an Azure OpenAI inference endpoint. - * **`index` (string | string[])**: List of data streams, indices, and index aliases used to limit the request. Wildcard expressions (*) are supported. - * **`allow_no_indices` (Optional, boolean)**: If false, the request returns an error if any wildcard expression, index alias, or _all value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting foo*,bar* returns an error if an index starts with foo but no index starts with bar. - * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard expressions can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as open,hidden. - * **`flat_settings` (Optional, boolean)**: If true, returns settings in flat format. - * **`ignore_unavailable` (Optional, boolean)**: If false, requests that target a missing index return an error. - * **`include_defaults` (Optional, boolean)**: If true, return all default settings in the response. - * **`local` (Optional, boolean)**: If true, the request retrieves information from the local node only. Defaults to false, which means information is retrieved from the master node. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - * **`features` (Optional, { name, description } | { name, description }[])**: Return only information on specified index features +Create an inference endpoint to perform an inference task with the `azureopenai` service. +The list of chat completion models that you can choose from in your Azure OpenAI deployment include: +* [GPT-4 and GPT-4 Turbo models](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=global-standard%2Cstandard-chat-completions#gpt-4-and-gpt-4-turbo-models) +* [GPT-3.5](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=global-standard%2Cstandard-chat-completions#gpt-35) -### get_alias [_get_alias] +The list of embeddings models that you can choose from in your deployment can be found in the [Azure models documentation](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=global-standard%2Cstandard-chat-completions#embeddings). -Get aliases. Retrieves information for one or more data stream or index aliases. +When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. +After creating the endpoint, wait for the model deployment to complete before using it. +To verify the deployment status, use the get trained model statistics API. +Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. +Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-alias) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-inference-put-azureopenai) ```ts -client.indices.getAlias({ ... }) +client.inference.putAzureopenai({ task_type, azureopenai_inference_id, service, service_settings }) ``` +### Arguments [_arguments_inference.put_azureopenai] -### Arguments [_arguments_189] - -* **Request (object):** - - * **`name` (Optional, string | string[])**: List of aliases to retrieve. Supports wildcards (`*`). To retrieve all aliases, omit this parameter or use `*` or `_all`. - * **`index` (Optional, string | string[])**: List of data streams or indices used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. - * **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. - * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. - * **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +#### Request (object) [_request_inference.put_azureopenai] +- **`task_type` (Enum("completion" | "text_embedding"))**: The type of the inference task that the model will perform. +NOTE: The `chat_completion` task type only supports streaming and only through the _stream API. +- **`azureopenai_inference_id` (string)**: The unique identifier of the inference endpoint. +- **`service` (Enum("azureopenai"))**: The type of service supported for the specified task type. In this case, `azureopenai`. +- **`service_settings` ({ api_key, api_version, deployment_id, entra_id, rate_limit, resource_name })**: Settings used to install the inference model. These settings are specific to the `azureopenai` service. +- **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, strategy })**: The chunking configuration object. +- **`task_settings` (Optional, { user })**: Settings to configure the inference task. +These settings are specific to the task type you specified. +## client.inference.putCohere [_inference.put_cohere] +Create a Cohere inference endpoint. +Create an inference endpoint to perform an inference task with the `cohere` service. -### get_data_lifecycle [_get_data_lifecycle] +When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. +After creating the endpoint, wait for the model deployment to complete before using it. +To verify the deployment status, use the get trained model statistics API. +Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. +Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. -Get data stream lifecycles. Retrieves the data stream lifecycle configuration of one or more data streams. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-data-lifecycle) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-inference-put-cohere) ```ts -client.indices.getDataLifecycle({ name }) +client.inference.putCohere({ task_type, cohere_inference_id, service, service_settings }) ``` +### Arguments [_arguments_inference.put_cohere] -### Arguments [_arguments_190] +#### Request (object) [_request_inference.put_cohere] +- **`task_type` (Enum("completion" | "rerank" | "text_embedding"))**: The type of the inference task that the model will perform. +- **`cohere_inference_id` (string)**: The unique identifier of the inference endpoint. +- **`service` (Enum("cohere"))**: The type of service supported for the specified task type. In this case, `cohere`. +- **`service_settings` ({ api_key, embedding_type, model_id, rate_limit, similarity })**: Settings used to install the inference model. +These settings are specific to the `cohere` service. +- **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, strategy })**: The chunking configuration object. +- **`task_settings` (Optional, { input_type, return_documents, top_n, truncate })**: Settings to configure the inference task. +These settings are specific to the task type you specified. -* **Request (object):** +## client.inference.putElasticsearch [_inference.put_elasticsearch] +Create an Elasticsearch inference endpoint. - * **`name` (string | string[])**: List of data streams to limit the request. Supports wildcards (`*`). To target all data streams, omit this parameter or use `*` or `_all`. - * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of data stream that wildcard patterns can match. Supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. - * **`include_defaults` (Optional, boolean)**: If `true`, return all default settings in the response. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +Create an inference endpoint to perform an inference task with the `elasticsearch` service. +> info +> Your Elasticsearch deployment contains preconfigured ELSER and E5 inference endpoints, you only need to create the enpoints using the API if you want to customize the settings. +If you use the ELSER or the E5 model through the `elasticsearch` service, the API request will automatically download and deploy the model if it isn't downloaded yet. -### get_data_lifecycle_stats [_get_data_lifecycle_stats] +> info +> You might see a 502 bad gateway error in the response when using the Kibana Console. This error usually just reflects a timeout, while the model downloads in the background. You can check the download progress in the Machine Learning UI. If using the Python client, you can set the timeout parameter to a higher value. -Get data stream lifecycle stats. Get statistics about the data streams that are managed by a data stream lifecycle. +After creating the endpoint, wait for the model deployment to complete before using it. +To verify the deployment status, use the get trained model statistics API. +Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. +Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-data-lifecycle-stats) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-inference-put-elasticsearch) ```ts -client.indices.getDataLifecycleStats() +client.inference.putElasticsearch({ task_type, elasticsearch_inference_id, service, service_settings }) ``` +### Arguments [_arguments_inference.put_elasticsearch] -### get_data_stream [_get_data_stream] +#### Request (object) [_request_inference.put_elasticsearch] +- **`task_type` (Enum("rerank" | "sparse_embedding" | "text_embedding"))**: The type of the inference task that the model will perform. +- **`elasticsearch_inference_id` (string)**: The unique identifier of the inference endpoint. +The must not match the `model_id`. +- **`service` (Enum("elasticsearch"))**: The type of service supported for the specified task type. In this case, `elasticsearch`. +- **`service_settings` ({ adaptive_allocations, deployment_id, model_id, num_allocations, num_threads })**: Settings used to install the inference model. These settings are specific to the `elasticsearch` service. +- **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, strategy })**: The chunking configuration object. +- **`task_settings` (Optional, { return_documents })**: Settings to configure the inference task. +These settings are specific to the task type you specified. -Get data streams. Retrieves information about one or more data streams. +## client.inference.putElser [_inference.put_elser] +Create an ELSER inference endpoint. -[Endpoint documentation](docs-content://manage-data/data-store/data-streams.md) +Create an inference endpoint to perform an inference task with the `elser` service. +You can also deploy ELSER by using the Elasticsearch inference integration. -```ts -client.indices.getDataStream({ ... }) -``` +> info +> Your Elasticsearch deployment contains a preconfigured ELSER inference endpoint, you only need to create the enpoint using the API if you want to customize the settings. +The API request will automatically download and deploy the ELSER model if it isn't already downloaded. -### Arguments [_arguments_191] +> info +> You might see a 502 bad gateway error in the response when using the Kibana Console. This error usually just reflects a timeout, while the model downloads in the background. You can check the download progress in the Machine Learning UI. If using the Python client, you can set the timeout parameter to a higher value. -* **Request (object):** +After creating the endpoint, wait for the model deployment to complete before using it. +To verify the deployment status, use the get trained model statistics API. +Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. +Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. - * **`name` (Optional, string | string[])**: List of data stream names used to limit the request. Wildcard (`*`) expressions are supported. If omitted, all data streams are returned. - * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of data stream that wildcard patterns can match. Supports a list of values, such as `open,hidden`. - * **`include_defaults` (Optional, boolean)**: If true, returns all relevant default configurations for the index template. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - * **`verbose` (Optional, boolean)**: Whether the maximum timestamp for each data stream should be calculated and returned. +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-inference-put-elser) + +```ts +client.inference.putElser({ task_type, elser_inference_id, service, service_settings }) +``` +### Arguments [_arguments_inference.put_elser] +#### Request (object) [_request_inference.put_elser] +- **`task_type` (Enum("sparse_embedding"))**: The type of the inference task that the model will perform. +- **`elser_inference_id` (string)**: The unique identifier of the inference endpoint. +- **`service` (Enum("elser"))**: The type of service supported for the specified task type. In this case, `elser`. +- **`service_settings` ({ adaptive_allocations, num_allocations, num_threads })**: Settings used to install the inference model. These settings are specific to the `elser` service. +- **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, strategy })**: The chunking configuration object. -### get_field_mapping [_get_field_mapping] +## client.inference.putGoogleaistudio [_inference.put_googleaistudio] +Create an Google AI Studio inference endpoint. -Get mapping definitions. Retrieves mapping definitions for one or more fields. For data streams, the API retrieves field mappings for the stream’s backing indices. +Create an inference endpoint to perform an inference task with the `googleaistudio` service. -This API is useful if you don’t need a complete mapping or if an index mapping contains a large number of fields. +When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. +After creating the endpoint, wait for the model deployment to complete before using it. +To verify the deployment status, use the get trained model statistics API. +Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. +Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-mapping) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-inference-put-googleaistudio) ```ts -client.indices.getFieldMapping({ fields }) +client.inference.putGoogleaistudio({ task_type, googleaistudio_inference_id, service, service_settings }) ``` +### Arguments [_arguments_inference.put_googleaistudio] -### Arguments [_arguments_192] - -* **Request (object):** - - * **`fields` (string | string[])**: List or wildcard expression of fields used to limit returned information. Supports wildcards (`*`). - * **`index` (Optional, string | string[])**: List of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. - * **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. - * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. - * **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. - * **`include_defaults` (Optional, boolean)**: If `true`, return all default settings in the response. - * **`local` (Optional, boolean)**: If `true`, the request retrieves information from the local node only. +#### Request (object) [_request_inference.put_googleaistudio] +- **`task_type` (Enum("completion" | "text_embedding"))**: The type of the inference task that the model will perform. +- **`googleaistudio_inference_id` (string)**: The unique identifier of the inference endpoint. +- **`service` (Enum("googleaistudio"))**: The type of service supported for the specified task type. In this case, `googleaistudio`. +- **`service_settings` ({ api_key, model_id, rate_limit })**: Settings used to install the inference model. These settings are specific to the `googleaistudio` service. +- **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, strategy })**: The chunking configuration object. +## client.inference.putGooglevertexai [_inference.put_googlevertexai] +Create a Google Vertex AI inference endpoint. +Create an inference endpoint to perform an inference task with the `googlevertexai` service. -### get_index_template [_get_index_template] +When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. +After creating the endpoint, wait for the model deployment to complete before using it. +To verify the deployment status, use the get trained model statistics API. +Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. +Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. -Get index templates. Get information about one or more index templates. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-index-template) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-inference-put-googlevertexai) ```ts -client.indices.getIndexTemplate({ ... }) +client.inference.putGooglevertexai({ task_type, googlevertexai_inference_id, service, service_settings }) ``` +### Arguments [_arguments_inference.put_googlevertexai] -### Arguments [_arguments_193] +#### Request (object) [_request_inference.put_googlevertexai] +- **`task_type` (Enum("rerank" | "text_embedding"))**: The type of the inference task that the model will perform. +- **`googlevertexai_inference_id` (string)**: The unique identifier of the inference endpoint. +- **`service` (Enum("googlevertexai"))**: The type of service supported for the specified task type. In this case, `googlevertexai`. +- **`service_settings` ({ location, model_id, project_id, rate_limit, service_account_json })**: Settings used to install the inference model. These settings are specific to the `googlevertexai` service. +- **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, strategy })**: The chunking configuration object. +- **`task_settings` (Optional, { auto_truncate, top_n })**: Settings to configure the inference task. +These settings are specific to the task type you specified. -* **Request (object):** +## client.inference.putHuggingFace [_inference.put_hugging_face] +Create a Hugging Face inference endpoint. - * **`name` (Optional, string)**: List of index template names used to limit the request. Wildcard (*) expressions are supported. - * **`local` (Optional, boolean)**: If true, the request retrieves information from the local node only. Defaults to false, which means information is retrieved from the master node. - * **`flat_settings` (Optional, boolean)**: If true, returns settings in flat format. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - * **`include_defaults` (Optional, boolean)**: If true, returns all relevant default configurations for the index template. +Create an inference endpoint to perform an inference task with the `hugging_face` service. +You must first create an inference endpoint on the Hugging Face endpoint page to get an endpoint URL. +Select the model you want to use on the new endpoint creation page (for example `intfloat/e5-small-v2`), then select the sentence embeddings task under the advanced configuration section. +Create the endpoint and copy the URL after the endpoint initialization has been finished. +The following models are recommended for the Hugging Face service: -### get_mapping [_get_mapping] +* `all-MiniLM-L6-v2` +* `all-MiniLM-L12-v2` +* `all-mpnet-base-v2` +* `e5-base-v2` +* `e5-small-v2` +* `multilingual-e5-base` +* `multilingual-e5-small` -Get mapping definitions. For data streams, the API retrieves mappings for the stream’s backing indices. +When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. +After creating the endpoint, wait for the model deployment to complete before using it. +To verify the deployment status, use the get trained model statistics API. +Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. +Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-mapping) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-inference-put-hugging-face) ```ts -client.indices.getMapping({ ... }) +client.inference.putHuggingFace({ task_type, huggingface_inference_id, service, service_settings }) ``` +### Arguments [_arguments_inference.put_hugging_face] -### Arguments [_arguments_194] - -* **Request (object):** +#### Request (object) [_request_inference.put_hugging_face] +- **`task_type` (Enum("text_embedding"))**: The type of the inference task that the model will perform. +- **`huggingface_inference_id` (string)**: The unique identifier of the inference endpoint. +- **`service` (Enum("hugging_face"))**: The type of service supported for the specified task type. In this case, `hugging_face`. +- **`service_settings` ({ api_key, rate_limit, url })**: Settings used to install the inference model. These settings are specific to the `hugging_face` service. +- **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, strategy })**: The chunking configuration object. - * **`index` (Optional, string | string[])**: List of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. - * **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. - * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. - * **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. - * **`local` (Optional, boolean)**: If `true`, the request retrieves information from the local node only. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +## client.inference.putJinaai [_inference.put_jinaai] +Create an JinaAI inference endpoint. +Create an inference endpoint to perform an inference task with the `jinaai` service. +To review the available `rerank` models, refer to . +To review the available `text_embedding` models, refer to the . -### get_migrate_reindex_status [_get_migrate_reindex_status] +When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. +After creating the endpoint, wait for the model deployment to complete before using it. +To verify the deployment status, use the get trained model statistics API. +Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. +Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. -Get the migration reindexing status. - -Get the status of a migration reindex attempt for a data stream or index. +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-inference-put-jinaai) ```ts -client.indices.getMigrateReindexStatus({ index }) +client.inference.putJinaai({ task_type, jinaai_inference_id, service, service_settings }) ``` +### Arguments [_arguments_inference.put_jinaai] -### Arguments [_arguments_195] +#### Request (object) [_request_inference.put_jinaai] +- **`task_type` (Enum("rerank" | "text_embedding"))**: The type of the inference task that the model will perform. +- **`jinaai_inference_id` (string)**: The unique identifier of the inference endpoint. +- **`service` (Enum("jinaai"))**: The type of service supported for the specified task type. In this case, `jinaai`. +- **`service_settings` ({ api_key, model_id, rate_limit, similarity })**: Settings used to install the inference model. These settings are specific to the `jinaai` service. +- **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, strategy })**: The chunking configuration object. +- **`task_settings` (Optional, { return_documents, task, top_n })**: Settings to configure the inference task. +These settings are specific to the task type you specified. -* **Request (object):** +## client.inference.putMistral [_inference.put_mistral] +Create a Mistral inference endpoint. - * **`index` (string | string[])**: The index or data stream name. +Creates an inference endpoint to perform an inference task with the `mistral` service. +When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. +After creating the endpoint, wait for the model deployment to complete before using it. +To verify the deployment status, use the get trained model statistics API. +Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. +Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. - -### get_settings [_get_settings_2] - -Get index settings. Get setting information for one or more indices. For data streams, it returns setting information for the stream’s backing indices. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-settings) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-inference-put-mistral) ```ts -client.indices.getSettings({ ... }) +client.inference.putMistral({ task_type, mistral_inference_id, service, service_settings }) ``` +### Arguments [_arguments_inference.put_mistral] -### Arguments [_arguments_196] +#### Request (object) [_request_inference.put_mistral] +- **`task_type` (Enum("text_embedding"))**: The task type. +The only valid task type for the model to perform is `text_embedding`. +- **`mistral_inference_id` (string)**: The unique identifier of the inference endpoint. +- **`service` (Enum("mistral"))**: The type of service supported for the specified task type. In this case, `mistral`. +- **`service_settings` ({ api_key, max_input_tokens, model, rate_limit })**: Settings used to install the inference model. These settings are specific to the `mistral` service. +- **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, strategy })**: The chunking configuration object. -* **Request (object):** +## client.inference.putOpenai [_inference.put_openai] +Create an OpenAI inference endpoint. - * **`index` (Optional, string | string[])**: List of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. - * **`name` (Optional, string | string[])**: List or wildcard expression of settings to retrieve. - * **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with foo but no index starts with `bar`. - * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. - * **`flat_settings` (Optional, boolean)**: If `true`, returns settings in flat format. - * **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. - * **`include_defaults` (Optional, boolean)**: If `true`, return all default settings in the response. - * **`local` (Optional, boolean)**: If `true`, the request retrieves information from the local node only. If `false`, information is retrieved from the master node. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +Create an inference endpoint to perform an inference task with the `openai` service. +When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. +After creating the endpoint, wait for the model deployment to complete before using it. +To verify the deployment status, use the get trained model statistics API. +Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. +Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-inference-put-openai) -### get_template [_get_template] - -Get index templates. Get information about one or more index templates. +```ts +client.inference.putOpenai({ task_type, openai_inference_id, service, service_settings }) +``` -::::{important} -This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8. -:::: +### Arguments [_arguments_inference.put_openai] +#### Request (object) [_request_inference.put_openai] +- **`task_type` (Enum("chat_completion" | "completion" | "text_embedding"))**: The type of the inference task that the model will perform. +NOTE: The `chat_completion` task type only supports streaming and only through the _stream API. +- **`openai_inference_id` (string)**: The unique identifier of the inference endpoint. +- **`service` (Enum("openai"))**: The type of service supported for the specified task type. In this case, `openai`. +- **`service_settings` ({ api_key, dimensions, model_id, organization_id, rate_limit, url })**: Settings used to install the inference model. These settings are specific to the `openai` service. +- **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, strategy })**: The chunking configuration object. +- **`task_settings` (Optional, { user })**: Settings to configure the inference task. +These settings are specific to the task type you specified. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-template) +## client.inference.putVoyageai [_inference.put_voyageai] +Create a VoyageAI inference endpoint. -```ts -client.indices.getTemplate({ ... }) -``` +Create an inference endpoint to perform an inference task with the `voyageai` service. +Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. -### Arguments [_arguments_197] +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-inference-put-voyageai) -* **Request (object):** +```ts +client.inference.putVoyageai({ task_type, voyageai_inference_id, service, service_settings }) +``` - * **`name` (Optional, string | string[])**: List of index template names used to limit the request. Wildcard (`*`) expressions are supported. To return all index templates, omit this parameter or use a value of `_all` or `*`. - * **`flat_settings` (Optional, boolean)**: If `true`, returns settings in flat format. - * **`local` (Optional, boolean)**: If `true`, the request retrieves information from the local node only. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +### Arguments [_arguments_inference.put_voyageai] +#### Request (object) [_request_inference.put_voyageai] +- **`task_type` (Enum("text_embedding" | "rerank"))**: The type of the inference task that the model will perform. +- **`voyageai_inference_id` (string)**: The unique identifier of the inference endpoint. +- **`service` (Enum("voyageai"))**: The type of service supported for the specified task type. In this case, `voyageai`. +- **`service_settings` ({ dimensions, model_id, rate_limit, embedding_type })**: Settings used to install the inference model. These settings are specific to the `voyageai` service. +- **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, strategy })**: The chunking configuration object. +- **`task_settings` (Optional, { input_type, return_documents, top_k, truncation })**: Settings to configure the inference task. +These settings are specific to the task type you specified. +## client.inference.putWatsonx [_inference.put_watsonx] +Create a Watsonx inference endpoint. -### migrate_reindex [_migrate_reindex] +Create an inference endpoint to perform an inference task with the `watsonxai` service. +You need an IBM Cloud Databases for Elasticsearch deployment to use the `watsonxai` inference service. +You can provision one through the IBM catalog, the Cloud Databases CLI plug-in, the Cloud Databases API, or Terraform. -Reindex legacy backing indices. +When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. +After creating the endpoint, wait for the model deployment to complete before using it. +To verify the deployment status, use the get trained model statistics API. +Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. +Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. -Reindex all legacy backing indices for a data stream. This operation occurs in a persistent task. The persistent task ID is returned immediately and the reindexing work is completed in that task. +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-inference-put-watsonx) ```ts -client.indices.migrateReindex({ ... }) +client.inference.putWatsonx({ task_type, watsonx_inference_id, service, service_settings }) ``` +### Arguments [_arguments_inference.put_watsonx] -### Arguments [_arguments_198] +#### Request (object) [_request_inference.put_watsonx] +- **`task_type` (Enum("text_embedding"))**: The task type. +The only valid task type for the model to perform is `text_embedding`. +- **`watsonx_inference_id` (string)**: The unique identifier of the inference endpoint. +- **`service` (Enum("watsonxai"))**: The type of service supported for the specified task type. In this case, `watsonxai`. +- **`service_settings` ({ api_key, api_version, model_id, project_id, rate_limit, url })**: Settings used to install the inference model. These settings are specific to the `watsonxai` service. -* **Request (object):** +## client.inference.rerank [_inference.rerank] +Perform rereanking inference on the service - * **`reindex` (Optional, { mode, source })** +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-inference-inference) + +```ts +client.inference.rerank({ inference_id, query, input }) +``` +### Arguments [_arguments_inference.rerank] +#### Request (object) [_request_inference.rerank] +- **`inference_id` (string)**: The unique identifier for the inference endpoint. +- **`query` (string)**: Query input. +- **`input` (string | string[])**: The text on which you want to perform the inference task. +It can be a single string or an array. -### migrate_to_data_stream [_migrate_to_data_stream] +> info +> Inference endpoints for the `completion` task type currently only support a single string as input. +- **`task_settings` (Optional, User-defined value)**: Task settings for the individual inference request. +These settings are specific to the task type you specified and override the task settings specified when initializing the service. +- **`timeout` (Optional, string | -1 | 0)**: The amount of time to wait for the inference request to complete. -Convert an index alias to a data stream. Converts an index alias to a data stream. You must have a matching index template that is data stream enabled. The alias must meet the following criteria: The alias must have a write index; All indices for the alias must have a `@timestamp` field mapping of a `date` or `date_nanos` field type; The alias must not have any filters; The alias must not use custom routing. If successful, the request removes the alias and creates a data stream with the same name. The indices for the alias become hidden backing indices for the stream. The write index for the alias becomes the write index for the stream. +## client.inference.sparseEmbedding [_inference.sparse_embedding] +Perform sparse embedding inference on the service -[Endpoint documentation](docs-content://manage-data/data-store/data-streams.md) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-inference-inference) ```ts -client.indices.migrateToDataStream({ name }) +client.inference.sparseEmbedding({ inference_id, input }) ``` +### Arguments [_arguments_inference.sparse_embedding] -### Arguments [_arguments_199] +#### Request (object) [_request_inference.sparse_embedding] +- **`inference_id` (string)**: The inference Id +- **`input` (string | string[])**: Inference input. +Either a string or an array of strings. +- **`task_settings` (Optional, User-defined value)**: Optional task settings +- **`timeout` (Optional, string | -1 | 0)**: Specifies the amount of time to wait for the inference request to complete. -* **Request (object):** +## client.inference.streamCompletion [_inference.stream_completion] +Perform streaming inference. +Get real-time responses for completion tasks by delivering answers incrementally, reducing response times during computation. +This API works only with the completion task type. - * **`name` (string)**: Name of the index alias to convert to a data stream. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs. +This API requires the `monitor_inference` cluster privilege (the built-in `inference_admin` and `inference_user` roles grant this privilege). You must use a client that supports streaming. +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-inference-stream-inference) -### modify_data_stream [_modify_data_stream] +```ts +client.inference.streamCompletion({ inference_id, input }) +``` -Update data streams. Performs one or more data stream modification actions in a single atomic operation. +### Arguments [_arguments_inference.stream_completion] -[Endpoint documentation](docs-content://manage-data/data-store/data-streams.md) +#### Request (object) [_request_inference.stream_completion] +- **`inference_id` (string)**: The unique identifier for the inference endpoint. +- **`input` (string | string[])**: The text on which you want to perform the inference task. +It can be a single string or an array. -```ts -client.indices.modifyDataStream({ actions }) -``` +NOTE: Inference endpoints for the completion task type currently only support a single string as input. +- **`task_settings` (Optional, User-defined value)**: Optional task settings +## client.inference.textEmbedding [_inference.text_embedding] +Perform text embedding inference on the service -### Arguments [_arguments_200] +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-inference-inference) -* **Request (object):** +```ts +client.inference.textEmbedding({ inference_id, input }) +``` - * **`actions` ({ add_backing_index, remove_backing_index }[])**: Actions to perform. +### Arguments [_arguments_inference.text_embedding] +#### Request (object) [_request_inference.text_embedding] +- **`inference_id` (string)**: The inference Id +- **`input` (string | string[])**: Inference input. +Either a string or an array of strings. +- **`task_settings` (Optional, User-defined value)**: Optional task settings +- **`timeout` (Optional, string | -1 | 0)**: Specifies the amount of time to wait for the inference request to complete. +## client.inference.update [_inference.update] +Update an inference endpoint. -### open [_open] +Modify `task_settings`, secrets (within `service_settings`), or `num_allocations` for an inference endpoint, depending on the specific endpoint service and `task_type`. -Open a closed index. For data streams, the API opens any closed backing indices. +IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. +For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. +However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs. -A closed index is blocked for read/write operations and does not allow all operations that opened indices allow. It is not possible to index documents or to search for documents in a closed index. This allows closed indices to not have to maintain internal data structures for indexing or searching documents, resulting in a smaller overhead on the cluster. +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-inference-update) -When opening or closing an index, the master is responsible for restarting the index shards to reflect the new state of the index. The shards will then go through the normal recovery process. The data of opened or closed indices is automatically replicated by the cluster to ensure that enough shard copies are safely kept around at all times. +```ts +client.inference.update({ inference_id }) +``` -You can open and close multiple indices. An error is thrown if the request explicitly refers to a missing index. This behavior can be turned off by using the `ignore_unavailable=true` parameter. +### Arguments [_arguments_inference.update] -By default, you must explicitly name the indices you are opening or closing. To open or close indices with `_all`, `*`, or other wildcard expressions, change the `action.destructive_requires_name` setting to `false`. This setting can also be changed with the cluster update settings API. +#### Request (object) [_request_inference.update] +- **`inference_id` (string)**: The unique identifier of the inference endpoint. +- **`task_type` (Optional, Enum("sparse_embedding" | "text_embedding" | "rerank" | "completion" | "chat_completion"))**: The type of inference task that the model performs. +- **`inference_config` (Optional, { chunking_settings, service, service_settings, task_settings })** -Closed indices consume a significant amount of disk-space which can cause problems in managed environments. Closing indices can be turned off with the cluster settings API by setting `cluster.indices.close.enable` to `false`. +## client.ingest.deleteGeoipDatabase [_ingest.delete_geoip_database] +Delete GeoIP database configurations. -Because opening or closing an index allocates its shards, the `wait_for_active_shards` setting on index creation applies to the `_open` and `_close` index actions as well. +Delete one or more IP geolocation database configurations. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-open) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ingest-delete-geoip-database) ```ts -client.indices.open({ index }) +client.ingest.deleteGeoipDatabase({ id }) ``` +### Arguments [_arguments_ingest.delete_geoip_database] + +#### Request (object) [_request_ingest.delete_geoip_database] +- **`id` (string | string[])**: A list of geoip database configurations to delete +- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string | -1 | 0)**: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + +## client.ingest.deleteIpLocationDatabase [_ingest.delete_ip_location_database] +Delete IP geolocation database configurations. -### Arguments [_arguments_201] +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ingest-delete-ip-location-database) -* **Request (object):** +```ts +client.ingest.deleteIpLocationDatabase({ id }) +``` - * **`index` (string | string[])**: List of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). By default, you must explicitly name the indices you using to limit the request. To limit a request using `_all`, `*`, or other wildcard expressions, change the `action.destructive_requires_name` setting to false. You can update this setting in the `elasticsearch.yml` file or using the cluster update settings API. - * **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. - * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. - * **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - * **`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))**: The number of shard copies that must be active before proceeding with the operation. Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). +### Arguments [_arguments_ingest.delete_ip_location_database] +#### Request (object) [_request_ingest.delete_ip_location_database] +- **`id` (string | string[])**: A list of IP location database configurations. +- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +A value of `-1` indicates that the request should never time out. +- **`timeout` (Optional, string | -1 | 0)**: The period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. +A value of `-1` indicates that the request should never time out. +## client.ingest.deletePipeline [_ingest.delete_pipeline] +Delete pipelines. +Delete one or more ingest pipelines. -### promote_data_stream [_promote_data_stream] +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ingest-delete-pipeline) -Promote a data stream. Promote a data stream from a replicated data stream managed by cross-cluster replication (CCR) to a regular data stream. +```ts +client.ingest.deletePipeline({ id }) +``` -With CCR auto following, a data stream from a remote cluster can be replicated to the local cluster. These data streams can’t be rolled over in the local cluster. These replicated data streams roll over only if the upstream data stream rolls over. In the event that the remote cluster is no longer available, the data stream in the local cluster can be promoted to a regular data stream, which allows these data streams to be rolled over in the local cluster. +### Arguments [_arguments_ingest.delete_pipeline] -::::{note} -When promoting a data stream, ensure the local cluster has a data stream enabled index template that matches the data stream. If this is missing, the data stream will not be able to roll over until a matching index template is created. This will affect the lifecycle management of the data stream and interfere with the data stream size and retention. -:::: +#### Request (object) [_request_ingest.delete_pipeline] +- **`id` (string)**: Pipeline ID or wildcard expression of pipeline IDs used to limit the request. +To delete all ingest pipelines in a cluster, use a value of `*`. +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. +## client.ingest.geoIpStats [_ingest.geo_ip_stats] +Get GeoIP statistics. +Get download statistics for GeoIP2 databases that are used with the GeoIP processor. -[Endpoint documentation](docs-content://manage-data/data-store/data-streams.md) +[Endpoint documentation](https://www.elastic.co/docs/reference/enrich-processor/geoip-processor) ```ts -client.indices.promoteDataStream({ name }) +client.ingest.geoIpStats() ``` -### Arguments [_arguments_202] +## client.ingest.getGeoipDatabase [_ingest.get_geoip_database] +Get GeoIP database configurations. -* **Request (object):** +Get information about one or more IP geolocation database configurations. - * **`name` (string)**: The name of the data stream - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ingest-get-geoip-database) +```ts +client.ingest.getGeoipDatabase({ ... }) +``` +### Arguments [_arguments_ingest.get_geoip_database] -### put_alias [_put_alias] +#### Request (object) [_request_ingest.get_geoip_database] +- **`id` (Optional, string | string[])**: A list of database configuration IDs to retrieve. +Wildcard (`*`) expressions are supported. +To get all database configurations, omit this parameter or use `*`. -Create or update an alias. Adds a data stream or index to an alias. +## client.ingest.getIpLocationDatabase [_ingest.get_ip_location_database] +Get IP geolocation database configurations. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-update-aliases) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ingest-get-ip-location-database) ```ts -client.indices.putAlias({ index, name }) +client.ingest.getIpLocationDatabase({ ... }) ``` +### Arguments [_arguments_ingest.get_ip_location_database] + +#### Request (object) [_request_ingest.get_ip_location_database] +- **`id` (Optional, string | string[])**: List of database configuration IDs to retrieve. +Wildcard (`*`) expressions are supported. +To get all database configurations, omit this parameter or use `*`. +- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +A value of `-1` indicates that the request should never time out. -### Arguments [_arguments_203] +## client.ingest.getPipeline [_ingest.get_pipeline] +Get pipelines. -* **Request (object):** +Get information about one or more ingest pipelines. +This API returns a local reference of the pipeline. - * **`index` (string | string[])**: List of data streams or indices to add. Supports wildcards (`*`). Wildcard patterns that match both data streams and indices return an error. - * **`name` (string)**: Alias to update. If the alias doesn’t exist, the request creates it. Index alias names support date math. - * **`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: Query used to limit documents the alias can access. - * **`index_routing` (Optional, string)**: Value used to route indexing operations to a specific shard. If specified, this overwrites the `routing` value for indexing operations. Data stream aliases don’t support this parameter. - * **`is_write_index` (Optional, boolean)**: If `true`, sets the write index or data stream for the alias. If an alias points to multiple indices or data streams and `is_write_index` isn’t set, the alias rejects write requests. If an index alias points to one index and `is_write_index` isn’t set, the index automatically acts as the write index. Data stream aliases don’t automatically set a write data stream, even if the alias points to one data stream. - * **`routing` (Optional, string)**: Value used to route indexing and search operations to a specific shard. Data stream aliases don’t support this parameter. - * **`search_routing` (Optional, string)**: Value used to route search operations to a specific shard. If specified, this overwrites the `routing` value for search operations. Data stream aliases don’t support this parameter. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ingest-get-pipeline) +```ts +client.ingest.getPipeline({ ... }) +``` +### Arguments [_arguments_ingest.get_pipeline] -### put_data_lifecycle [_put_data_lifecycle] +#### Request (object) [_request_ingest.get_pipeline] +- **`id` (Optional, string)**: List of pipeline IDs to retrieve. +Wildcard (`*`) expressions are supported. +To get all ingest pipelines, omit this parameter or use `*`. +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +- **`summary` (Optional, boolean)**: Return pipelines without their definitions (default: false) -Update data stream lifecycles. Update the data stream lifecycle of the specified data streams. +## client.ingest.processorGrok [_ingest.processor_grok] +Run a grok processor. +Extract structured fields out of a single text field within a document. +You must choose which field to extract matched fields from, as well as the grok pattern you expect will match. +A grok pattern is like a regular expression that supports aliased expressions that can be reused. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-data-lifecycle) +[Endpoint documentation](https://www.elastic.co/docs/reference/enrich-processor/grok-processor) ```ts -client.indices.putDataLifecycle({ name }) +client.ingest.processorGrok() ``` -### Arguments [_arguments_204] - -* **Request (object):** +## client.ingest.putGeoipDatabase [_ingest.put_geoip_database] +Create or update a GeoIP database configuration. - * **`name` (string | string[])**: List of data streams used to limit the request. Supports wildcards (`*`). To target all data streams use `*` or `_all`. - * **`lifecycle` (Optional, { data_retention, downsampling, enabled })** - * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of data stream that wildcard patterns can match. Supports a list of values, such as `open,hidden`. Valid values are: `all`, `hidden`, `open`, `closed`, `none`. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +Refer to the create or update IP geolocation database configuration API. +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ingest-put-geoip-database) +```ts +client.ingest.putGeoipDatabase({ id, name, maxmind }) +``` -### put_index_template [_put_index_template] - -Create or update an index template. Index templates define settings, mappings, and aliases that can be applied automatically to new indices. +### Arguments [_arguments_ingest.put_geoip_database] -Elasticsearch applies templates to new indices based on an wildcard pattern that matches the index name. Index templates are applied during data stream or index creation. For data streams, these settings and mappings are applied when the stream’s backing indices are created. Settings and mappings specified in a create index API request override any settings or mappings specified in an index template. Changes to index templates do not affect existing indices, including the existing backing indices of a data stream. +#### Request (object) [_request_ingest.put_geoip_database] +- **`id` (string)**: ID of the database configuration to create or update. +- **`name` (string)**: The provider-assigned name of the IP geolocation database to download. +- **`maxmind` ({ account_id })**: The configuration necessary to identify which IP geolocation provider to use to download the database, as well as any provider-specific configuration necessary for such downloading. +At present, the only supported provider is maxmind, and the maxmind provider requires that an account_id (string) is configured. +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. -You can use C-style `/* *\/` block comments in index templates. You can include comments anywhere in the request body, except before the opening curly bracket. +## client.ingest.putIpLocationDatabase [_ingest.put_ip_location_database] +Create or update an IP geolocation database configuration. -**Multiple matching templates** +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ingest-put-ip-location-database) -If multiple index templates match the name of a new index or data stream, the template with the highest priority is used. +```ts +client.ingest.putIpLocationDatabase({ id }) +``` -Multiple templates with overlapping index patterns at the same priority are not allowed and an error will be thrown when attempting to create a template matching an existing index template at identical priorities. +### Arguments [_arguments_ingest.put_ip_location_database] -**Composing aliases, mappings, and settings** +#### Request (object) [_request_ingest.put_ip_location_database] +- **`id` (string)**: The database configuration identifier. +- **`configuration` (Optional, { name, maxmind, ipinfo })** +- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +A value of `-1` indicates that the request should never time out. +- **`timeout` (Optional, string | -1 | 0)**: The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. +If no response is received before the timeout expires, the cluster metadata update still applies but the response indicates that it was not completely acknowledged. +A value of `-1` indicates that the request should never time out. -When multiple component templates are specified in the `composed_of` field for an index template, they are merged in the order specified, meaning that later component templates override earlier component templates. Any mappings, settings, or aliases from the parent index template are merged in next. Finally, any configuration on the index request itself is merged. Mapping definitions are merged recursively, which means that later mapping components can introduce new field mappings and update the mapping configuration. If a field mapping is already contained in an earlier component, its definition will be completely overwritten by the later one. This recursive merging strategy applies not only to field mappings, but also root options like `dynamic_templates` and `meta`. If an earlier component contains a `dynamic_templates` block, then by default new `dynamic_templates` entries are appended onto the end. If an entry already exists with the same key, then it is overwritten by the new definition. +## client.ingest.putPipeline [_ingest.put_pipeline] +Create or update a pipeline. +Changes made using this API take effect immediately. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-index-template) +[Endpoint documentation](https://www.elastic.co/docs/manage-data/ingest/transform-enrich/ingest-pipelines) ```ts -client.indices.putIndexTemplate({ name }) +client.ingest.putPipeline({ id }) ``` +### Arguments [_arguments_ingest.put_pipeline] -### Arguments [_arguments_205] +#### Request (object) [_request_ingest.put_pipeline] +- **`id` (string)**: ID of the ingest pipeline to create or update. +- **`_meta` (Optional, Record)**: Optional metadata about the ingest pipeline. May have any contents. This map is not automatically generated by Elasticsearch. +- **`description` (Optional, string)**: Description of the ingest pipeline. +- **`on_failure` (Optional, { append, attachment, bytes, circle, community_id, convert, csv, date, date_index_name, dissect, dot_expander, drop, enrich, fail, fingerprint, foreach, ip_location, geo_grid, geoip, grok, gsub, html_strip, inference, join, json, kv, lowercase, network_direction, pipeline, redact, registered_domain, remove, rename, reroute, script, set, set_security_user, sort, split, terminate, trim, uppercase, urldecode, uri_parts, user_agent }[])**: Processors to run immediately after a processor failure. Each processor supports a processor-level `on_failure` value. If a processor without an `on_failure` value fails, Elasticsearch uses this pipeline-level parameter as a fallback. The processors in this parameter run sequentially in the order specified. Elasticsearch will not attempt to run the pipeline's remaining processors. +- **`processors` (Optional, { append, attachment, bytes, circle, community_id, convert, csv, date, date_index_name, dissect, dot_expander, drop, enrich, fail, fingerprint, foreach, ip_location, geo_grid, geoip, grok, gsub, html_strip, inference, join, json, kv, lowercase, network_direction, pipeline, redact, registered_domain, remove, rename, reroute, script, set, set_security_user, sort, split, terminate, trim, uppercase, urldecode, uri_parts, user_agent }[])**: Processors used to perform transformations on documents before indexing. Processors run sequentially in the order specified. +- **`version` (Optional, number)**: Version number used by external systems to track ingest pipelines. This parameter is intended for external systems only. Elasticsearch does not use or validate pipeline version numbers. +- **`deprecated` (Optional, boolean)**: Marks this ingest pipeline as deprecated. +When a deprecated ingest pipeline is referenced as the default or final pipeline when creating or updating a non-deprecated index template, Elasticsearch will emit a deprecation warning. +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +- **`if_version` (Optional, number)**: Required version for optimistic concurrency control for pipeline updates -* **Request (object):** +## client.ingest.simulate [_ingest.simulate] +Simulate a pipeline. - * **`name` (string)**: Index or template name - * **`index_patterns` (Optional, string | string[])**: Name of the index template to create. - * **`composed_of` (Optional, string[])**: An ordered list of component template names. Component templates are merged in the order specified, meaning that the last component template specified has the highest precedence. - * **`template` (Optional, { aliases, mappings, settings, lifecycle })**: Template to be applied. It may optionally include an `aliases`, `mappings`, or `settings` configuration. - * **`data_stream` (Optional, { hidden, allow_custom_routing })**: If this object is included, the template is used to create data streams and their backing indices. Supports an empty object. Data streams require a matching index template with a `data_stream` object. - * **`priority` (Optional, number)**: Priority to determine index template precedence when a new data stream or index is created. The index template with the highest priority is chosen. If no priority is specified the template is treated as though it is of priority 0 (lowest priority). This number is not automatically generated by Elasticsearch. - * **`version` (Optional, number)**: Version number used to manage index templates externally. This number is not automatically generated by Elasticsearch. External systems can use these version numbers to simplify template management. To unset a version, replace the template without specifying one. - * **`_meta` (Optional, Record)**: Optional user metadata about the index template. It may have any contents. It is not automatically generated or used by Elasticsearch. This user-defined object is stored in the cluster state, so keeping it short is preferable To unset the metadata, replace the template without specifying it. - * **`allow_auto_create` (Optional, boolean)**: This setting overrides the value of the `action.auto_create_index` cluster setting. If set to `true` in a template, then indices can be automatically created using that template even if auto-creation of indices is disabled via `actions.auto_create_index`. If set to `false`, then indices or data streams matching the template must always be explicitly created, and may never be automatically created. - * **`ignore_missing_component_templates` (Optional, string[])**: The configuration option ignore_missing_component_templates can be used when an index template references a component template that might not exist - * **`deprecated` (Optional, boolean)**: Marks this index template as deprecated. When creating or updating a non-deprecated index template that uses deprecated components, Elasticsearch will emit a deprecation warning. - * **`create` (Optional, boolean)**: If `true`, this request cannot replace or update existing index templates. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - * **`cause` (Optional, string)**: User defined reason for creating/updating the index template +Run an ingest pipeline against a set of provided documents. +You can either specify an existing pipeline to use with the provided documents or supply a pipeline definition in the body of the request. +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ingest-simulate) +```ts +client.ingest.simulate({ docs }) +``` -### put_mapping [_put_mapping] +### Arguments [_arguments_ingest.simulate] -Update field mappings. Add new fields to an existing data stream or index. You can also use this API to change the search settings of existing fields and add new properties to existing object fields. For data streams, these changes are applied to all backing indices by default. +#### Request (object) [_request_ingest.simulate] +- **`docs` ({ _id, _index, _source }[])**: Sample documents to test in the pipeline. +- **`id` (Optional, string)**: The pipeline to test. +If you don't specify a `pipeline` in the request body, this parameter is required. +- **`pipeline` (Optional, { description, on_failure, processors, version, deprecated, _meta })**: The pipeline to test. +If you don't specify the `pipeline` request path parameter, this parameter is required. +If you specify both this and the request path parameter, the API only uses the request path parameter. +- **`verbose` (Optional, boolean)**: If `true`, the response includes output data for each processor in the executed pipeline. -**Add multi-fields to an existing field** +## client.license.delete [_license.delete] +Delete the license. -Multi-fields let you index the same field in different ways. You can use this API to update the fields mapping parameter and enable multi-fields for an existing field. WARNING: If an index (or data stream) contains documents when you add a multi-field, those documents will not have values for the new multi-field. You can populate the new multi-field with the update by query API. +When the license expires, your subscription level reverts to Basic. -**Change supported mapping parameters for an existing field** +If the operator privileges feature is enabled, only operator users can use this API. -The documentation for each mapping parameter indicates whether you can update it for an existing field using this API. For example, you can use the update mapping API to update the `ignore_above` parameter. +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-license-delete) -**Change the mapping of an existing field** +```ts +client.license.delete({ ... }) +``` -Except for supported mapping parameters, you can’t change the mapping or field type of an existing field. Changing an existing field could invalidate data that’s already indexed. +### Arguments [_arguments_license.delete] -If you need to change the mapping of a field in a data stream’s backing indices, refer to documentation about modifying data streams. If you need to change the mapping of a field in other indices, create a new index with the correct mapping and reindex your data into that index. +#### Request (object) [_request_license.delete] +- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. +- **`timeout` (Optional, string | -1 | 0)**: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. -**Rename a field** +## client.license.get [_license.get] +Get license information. + +Get information about your Elastic license including its type, its status, when it was issued, and when it expires. -Renaming a field would invalidate data already indexed under the old field name. Instead, add an alias field to create an alternate field name. +>info +> If the master node is generating a new cluster state, the get license API may return a `404 Not Found` response. +> If you receive an unexpected 404 response after cluster startup, wait a short period and retry the request. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-mapping) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-license-get) ```ts -client.indices.putMapping({ index }) +client.license.get({ ... }) ``` +### Arguments [_arguments_license.get] -### Arguments [_arguments_206] +#### Request (object) [_request_license.get] +- **`accept_enterprise` (Optional, boolean)**: If `true`, this parameter returns enterprise for Enterprise license types. If `false`, this parameter returns platinum for both platinum and enterprise license types. This behavior is maintained for backwards compatibility. +This parameter is deprecated and will always be set to true in 8.x. +- **`local` (Optional, boolean)**: Specifies whether to retrieve local information. The default value is `false`, which means the information is retrieved from the master node. -* **Request (object):** +## client.license.getBasicStatus [_license.get_basic_status] +Get the basic license status. - * **`index` (string | string[])**: A list of index names the mapping should be added to (supports wildcards); use `_all` or omit to add the mapping on all indices. - * **`date_detection` (Optional, boolean)**: Controls whether dynamic date detection is enabled. - * **`dynamic` (Optional, Enum("strict" | "runtime" | true | false))**: Controls whether new fields are added dynamically. - * **`dynamic_date_formats` (Optional, string[])**: If date detection is enabled then new string fields are checked against *dynamic_date_formats* and if the value matches then a new date field is added instead of string. - * **`dynamic_templates` (Optional, Record | Record[])**: Specify dynamic templates for the mapping. - * **`_field_names` (Optional, { enabled })**: Control whether field names are enabled for the index. - * **`_meta` (Optional, Record)**: A mapping type can have custom meta data associated with it. These are not used at all by Elasticsearch, but can be used to store application-specific metadata. - * **`numeric_detection` (Optional, boolean)**: Automatically map strings into numeric data types for all fields. - * **`properties` (Optional, Record)**: Mapping for a field. For new fields, this mapping can include: +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-license-get-basic-status) - * Field name - * Field data type - * Mapping parameters +```ts +client.license.getBasicStatus() +``` - * **`_routing` (Optional, { required })**: Enable making a routing value required on indexed documents. - * **`_source` (Optional, { compress, compress_threshold, enabled, excludes, includes, mode })**: Control whether the _source field is enabled on the index. - * **`runtime` (Optional, Record)**: Mapping of runtime fields for the index. - * **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. - * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. - * **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - * **`write_index_only` (Optional, boolean)**: If `true`, the mappings are applied only to the current write index for the target. +## client.license.getTrialStatus [_license.get_trial_status] +Get the trial status. +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-license-get-trial-status) -### put_settings [_put_settings_2] +```ts +client.license.getTrialStatus() +``` -Update index settings. Changes dynamic index settings in real time. For data streams, index setting changes are applied to all backing indices by default. -To revert a setting to the default value, use a null value. The list of per-index settings that can be updated dynamically on live indices can be found in index module documentation. To preserve existing settings from being updated, set the `preserve_existing` parameter to `true`. +## client.license.post [_license.post] +Update the license. -::::{note} -You can only define new analyzers on closed indices. To add an analyzer, you must close the index, define the analyzer, and reopen the index. You cannot close the write index of a data stream. To update the analyzer for a data stream’s write index and future backing indices, update the analyzer in the index template used by the stream. Then roll over the data stream to apply the new analyzer to the stream’s write index and future backing indices. This affects searches and any new data added to the stream after the rollover. However, it does not affect the data stream’s backing indices or their existing data. To change the analyzer for existing backing indices, you must create a new data stream and reindex your data into it. -:::: +You can update your license at runtime without shutting down your nodes. +License updates take effect immediately. +If the license you are installing does not support all of the features that were available with your previous license, however, you are notified in the response. +You must then re-submit the API request with the acknowledge parameter set to true. +NOTE: If Elasticsearch security features are enabled and you are installing a gold or higher license, you must enable TLS on the transport networking layer before you install the license. +If the operator privileges feature is enabled, only operator users can use this API. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-settings) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-license-post) ```ts -client.indices.putSettings({ ... }) +client.license.post({ ... }) ``` +### Arguments [_arguments_license.post] -### Arguments [_arguments_207] - -* **Request (object):** +#### Request (object) [_request_license.post] +- **`license` (Optional, { expiry_date_in_millis, issue_date_in_millis, start_date_in_millis, issued_to, issuer, max_nodes, max_resource_units, signature, type, uid })** +- **`licenses` (Optional, { expiry_date_in_millis, issue_date_in_millis, start_date_in_millis, issued_to, issuer, max_nodes, max_resource_units, signature, type, uid }[])**: A sequence of one or more JSON documents containing the license information. +- **`acknowledge` (Optional, boolean)**: Specifies whether you acknowledge the license changes. +- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. +- **`timeout` (Optional, string | -1 | 0)**: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - * **`index` (Optional, string | string[])**: List of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. - * **`settings` (Optional, { index, mode, routing_path, soft_deletes, sort, number_of_shards, number_of_replicas, number_of_routing_shards, check_on_startup, codec, routing_partition_size, load_fixed_bitset_filters_eagerly, hidden, auto_expand_replicas, merge, search, refresh_interval, max_result_window, max_inner_result_window, max_rescore_window, max_docvalue_fields_search, max_script_fields, max_ngram_diff, max_shingle_diff, blocks, max_refresh_listeners, analyze, highlight, max_terms_count, max_regex_length, routing, gc_deletes, default_pipeline, final_pipeline, lifecycle, provided_name, creation_date, creation_date_string, uuid, version, verified_before_close, format, max_slices_per_scroll, translog, query_string, priority, top_metrics_max_size, analysis, settings, time_series, queries, similarity, mapping, indexing.slowlog, indexing_pressure, store })** - * **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. - * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. - * **`flat_settings` (Optional, boolean)**: If `true`, returns settings in flat format. - * **`ignore_unavailable` (Optional, boolean)**: If `true`, returns settings in flat format. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - * **`preserve_existing` (Optional, boolean)**: If `true`, existing index settings remain unchanged. - * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +## client.license.postStartBasic [_license.post_start_basic] +Start a basic license. +Start an indefinite basic license, which gives access to all the basic features. +NOTE: In order to start a basic license, you must not currently have a basic license. -### put_template [_put_template] +If the basic license does not support all of the features that are available with your current license, however, you are notified in the response. +You must then re-submit the API request with the `acknowledge` parameter set to `true`. -Create or update an index template. Index templates define settings, mappings, and aliases that can be applied automatically to new indices. Elasticsearch applies templates to new indices based on an index pattern that matches the index name. +To check the status of your basic license, use the get basic license API. -::::{important} -This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8. -:::: +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-license-post-start-basic) +```ts +client.license.postStartBasic({ ... }) +``` -Composable templates always take precedence over legacy templates. If no composable template matches a new index, matching legacy templates are applied according to their order. +### Arguments [_arguments_license.post_start_basic] -Index templates are only applied during index creation. Changes to index templates do not affect existing indices. Settings and mappings specified in create index API requests override any settings or mappings specified in an index template. +#### Request (object) [_request_license.post_start_basic] +- **`acknowledge` (Optional, boolean)**: whether the user has acknowledged acknowledge messages (default: false) +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. -You can use C-style `/* *\/` block comments in index templates. You can include comments anywhere in the request body, except before the opening curly bracket. +## client.license.postStartTrial [_license.post_start_trial] +Start a trial. +Start a 30-day trial, which gives access to all subscription features. -**Indices matching multiple templates** +NOTE: You are allowed to start a trial only if your cluster has not already activated a trial for the current major product version. +For example, if you have already activated a trial for v8.0, you cannot start a new trial until v9.0. You can, however, request an extended trial at https://www.elastic.co/trialextension. -Multiple index templates can potentially match an index, in this case, both the settings and mappings are merged into the final configuration of the index. The order of the merging can be controlled using the order parameter, with lower order being applied first, and higher orders overriding them. NOTE: Multiple matching templates with the same order value will result in a non-deterministic merging order. +To check the status of your trial, use the get trial status API. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-template) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-license-post-start-trial) ```ts -client.indices.putTemplate({ name }) +client.license.postStartTrial({ ... }) ``` +### Arguments [_arguments_license.post_start_trial] -### Arguments [_arguments_208] +#### Request (object) [_request_license.post_start_trial] +- **`acknowledge` (Optional, boolean)**: whether the user has acknowledged acknowledge messages (default: false) +- **`type_query_string` (Optional, string)** +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. -* **Request (object):** +## client.logstash.deletePipeline [_logstash.delete_pipeline] +Delete a Logstash pipeline. +Delete a pipeline that is used for Logstash Central Management. +If the request succeeds, you receive an empty response with an appropriate status code. - * **`name` (string)**: The name of the template - * **`aliases` (Optional, Record)**: Aliases for the index. - * **`index_patterns` (Optional, string | string[])**: Array of wildcard expressions used to match the names of indices during creation. - * **`mappings` (Optional, { all_field, date_detection, dynamic, dynamic_date_formats, dynamic_templates, _field_names, index_field, _meta, numeric_detection, properties, _routing, _size, _source, runtime, enabled, subobjects, _data_stream_timestamp })**: Mapping for fields in the index. - * **`order` (Optional, number)**: Order in which Elasticsearch applies this template if index matches multiple templates. +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-logstash-delete-pipeline) +```ts +client.logstash.deletePipeline({ id }) +``` -Templates with lower *order* values are merged first. Templates with higher *order* values are merged later, overriding templates with lower values. ** *`settings` (Optional, { index, mode, routing_path, soft_deletes, sort, number_of_shards, number_of_replicas, number_of_routing_shards, check_on_startup, codec, routing_partition_size, load_fixed_bitset_filters_eagerly, hidden, auto_expand_replicas, merge, search, refresh_interval, max_result_window, max_inner_result_window, max_rescore_window, max_docvalue_fields_search, max_script_fields, max_ngram_diff, max_shingle_diff, blocks, max_refresh_listeners, analyze, highlight, max_terms_count, max_regex_length, routing, gc_deletes, default_pipeline, final_pipeline, lifecycle, provided_name, creation_date, creation_date_string, uuid, version, verified_before_close, format, max_slices_per_scroll, translog, query_string, priority, top_metrics_max_size, analysis, settings, time_series, queries, similarity, mapping, indexing.slowlog, indexing_pressure, store })**: Configuration options for the index. *** *`version` (Optional, number)**: Version number used to manage index templates externally. This number is not automatically generated by Elasticsearch. To unset a version, replace the template without specifying one. *** *`create` (Optional, boolean)**: If true, this request cannot replace or update existing index templates. *** *`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. ** *`cause` (Optional, string)** - +### Arguments [_arguments_logstash.delete_pipeline] -### recovery [_recovery_2] +#### Request (object) [_request_logstash.delete_pipeline] +- **`id` (string)**: An identifier for the pipeline. -Get index recovery information. Get information about ongoing and completed shard recoveries for one or more indices. For data streams, the API returns information for the stream’s backing indices. +## client.logstash.getPipeline [_logstash.get_pipeline] +Get Logstash pipelines. +Get pipelines that are used for Logstash Central Management. -All recoveries, whether ongoing or complete, are kept in the cluster state and may be reported on at any time. +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-logstash-get-pipeline) -Shard recovery is the process of initializing a shard copy, such as restoring a primary shard from a snapshot or creating a replica shard from a primary shard. When a shard recovery completes, the recovered shard is available for search and indexing. +```ts +client.logstash.getPipeline({ ... }) +``` -Recovery automatically occurs during the following processes: +### Arguments [_arguments_logstash.get_pipeline] -* When creating an index for the first time. -* When a node rejoins the cluster and starts up any missing primary shard copies using the data that it holds in its data path. -* Creation of new replica shard copies from the primary. -* Relocation of a shard copy to a different node in the same cluster. -* A snapshot restore operation. -* A clone, shrink, or split operation. +#### Request (object) [_request_logstash.get_pipeline] +- **`id` (Optional, string | string[])**: A list of pipeline identifiers. -You can determine the cause of a shard recovery using the recovery or cat recovery APIs. +## client.logstash.putPipeline [_logstash.put_pipeline] +Create or update a Logstash pipeline. -The index recovery API reports information about completed recoveries only for shard copies that currently exist in the cluster. It only reports the last recovery for each shard copy and does not report historical information about earlier recoveries, nor does it report information about the recoveries of shard copies that no longer exist. This means that if a shard copy completes a recovery and then Elasticsearch relocates it onto a different node then the information about the original recovery will not be shown in the recovery API. +Create a pipeline that is used for Logstash Central Management. +If the specified pipeline exists, it is replaced. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-recovery) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-logstash-put-pipeline) ```ts -client.indices.recovery({ ... }) +client.logstash.putPipeline({ id }) ``` +### Arguments [_arguments_logstash.put_pipeline] -### Arguments [_arguments_209] - -* **Request (object):** +#### Request (object) [_request_logstash.put_pipeline] +- **`id` (string)**: An identifier for the pipeline. +- **`pipeline` (Optional, { description, on_failure, processors, version, deprecated, _meta })** - * **`index` (Optional, string | string[])**: List of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. - * **`active_only` (Optional, boolean)**: If `true`, the response only includes ongoing shard recoveries. - * **`detailed` (Optional, boolean)**: If `true`, the response includes detailed information about shard recoveries. +## client.migration.deprecations [_migration.deprecations] +Get deprecation information. +Get information about different cluster, node, and index level settings that use deprecated features that will be removed or changed in the next major version. +TIP: This APIs is designed for indirect use by the Upgrade Assistant. +You are strongly recommended to use the Upgrade Assistant. +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-migration-deprecations) -### refresh [_refresh] - -Refresh an index. A refresh makes recent operations performed on one or more indices available for search. For data streams, the API runs the refresh operation on the stream’s backing indices. +```ts +client.migration.deprecations({ ... }) +``` -By default, Elasticsearch periodically refreshes indices every second, but only on indices that have received one search request or more in the last 30 seconds. You can change this default interval with the `index.refresh_interval` setting. +### Arguments [_arguments_migration.deprecations] -Refresh requests are synchronous and do not return a response until the refresh operation completes. +#### Request (object) [_request_migration.deprecations] +- **`index` (Optional, string)**: Comma-separate list of data streams or indices to check. Wildcard (*) expressions are supported. -Refreshes are resource-intensive. To ensure good cluster performance, it’s recommended to wait for Elasticsearch’s periodic refresh rather than performing an explicit refresh when possible. +## client.migration.getFeatureUpgradeStatus [_migration.get_feature_upgrade_status] +Get feature migration information. +Version upgrades sometimes require changes to how features store configuration information and data in system indices. +Check which features need to be migrated and the status of any migrations that are in progress. -If your application workflow indexes documents and then runs a search to retrieve the indexed document, it’s recommended to use the index API’s `refresh=wait_for` query parameter option. This option ensures the indexing operation waits for a periodic refresh before running the search. +TIP: This API is designed for indirect use by the Upgrade Assistant. +You are strongly recommended to use the Upgrade Assistant. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-refresh) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-migration-get-feature-upgrade-status) ```ts -client.indices.refresh({ ... }) +client.migration.getFeatureUpgradeStatus() ``` -### Arguments [_arguments_210] +## client.migration.postFeatureUpgrade [_migration.post_feature_upgrade] +Start the feature migration. +Version upgrades sometimes require changes to how features store configuration information and data in system indices. +This API starts the automatic migration process. -* **Request (object):** +Some functionality might be temporarily unavailable during the migration process. - * **`index` (Optional, string | string[])**: List of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. - * **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. - * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. - * **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. +TIP: The API is designed for indirect use by the Upgrade Assistant. We strongly recommend you use the Upgrade Assistant. +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-migration-get-feature-upgrade-status) +```ts +client.migration.postFeatureUpgrade() +``` -### reload_search_analyzers [_reload_search_analyzers] -Reload search analyzers. Reload an index’s search analyzers and their resources. For data streams, the API reloads search analyzers and resources for the stream’s backing indices. +## client.ml.clearTrainedModelDeploymentCache [_ml.clear_trained_model_deployment_cache] +Clear trained model deployment cache. -::::{important} -After reloading the search analyzers you should clear the request cache to make sure it doesn’t contain responses derived from the previous versions of the analyzer. -:::: +Cache will be cleared on all nodes where the trained model is assigned. +A trained model deployment may have an inference cache enabled. +As requests are handled by each allocated node, their responses may be cached on that individual node. +Calling this API clears the caches without restarting the deployment. +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-clear-trained-model-deployment-cache) -You can use the reload search analyzers API to pick up changes to synonym files used in the `synonym_graph` or `synonym` token filter of a search analyzer. To be eligible, the token filter must have an `updateable` flag of `true` and only be used in search analyzers. +```ts +client.ml.clearTrainedModelDeploymentCache({ model_id }) +``` -::::{note} -This API does not perform a reload for each shard of an index. Instead, it performs a reload for each node containing index shards. As a result, the total shard count returned by the API can differ from the number of index shards. Because reloading affects every node with an index shard, it is important to update the synonym file on every data node in the cluster—​including nodes that don’t contain a shard replica—​before using this API. This ensures the synonym file is updated everywhere in the cluster in case shards are relocated in the future. -:::: +### Arguments [_arguments_ml.clear_trained_model_deployment_cache] +#### Request (object) [_request_ml.clear_trained_model_deployment_cache] +- **`model_id` (string)**: The unique identifier of the trained model. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-reload-search-analyzers) +## client.ml.closeJob [_ml.close_job] +Close anomaly detection jobs. -```ts -client.indices.reloadSearchAnalyzers({ index }) -``` +A job can be opened and closed multiple times throughout its lifecycle. A closed job cannot receive data or perform analysis operations, but you can still explore and navigate results. +When you close a job, it runs housekeeping tasks such as pruning the model history, flushing buffers, calculating final results and persisting the model snapshots. Depending upon the size of the job, it could take several minutes to close and the equivalent time to re-open. After it is closed, the job has a minimal overhead on the cluster except for maintaining its meta data. Therefore it is a best practice to close jobs that are no longer required to process data. +If you close an anomaly detection job whose datafeed is running, the request first tries to stop the datafeed. This behavior is equivalent to calling stop datafeed API with the same timeout and force parameters as the close job request. +When a datafeed that has a specified end date stops, it automatically closes its associated job. +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-close-job) -### Arguments [_arguments_211] +```ts +client.ml.closeJob({ job_id }) +``` -* **Request (object):** +### Arguments [_arguments_ml.close_job] - * **`index` (string | string[])**: A list of index names to reload analyzers for - * **`allow_no_indices` (Optional, boolean)**: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) - * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Whether to expand wildcard expression to concrete indices that are open, closed or both. - * **`ignore_unavailable` (Optional, boolean)**: Whether specified concrete indices should be ignored when unavailable (missing or closed) +#### Request (object) [_request_ml.close_job] +- **`job_id` (string)**: Identifier for the anomaly detection job. It can be a job identifier, a group name, or a wildcard expression. You can close multiple anomaly detection jobs in a single API request by using a group name, a list of jobs, or a wildcard expression. You can close all jobs by using `_all` or by specifying `*` as the job identifier. +- **`allow_no_match` (Optional, boolean)**: Refer to the description for the `allow_no_match` query parameter. +- **`force` (Optional, boolean)**: Refer to the descriptiion for the `force` query parameter. +- **`timeout` (Optional, string | -1 | 0)**: Refer to the description for the `timeout` query parameter. +## client.ml.deleteCalendar [_ml.delete_calendar] +Delete a calendar. +Remove all scheduled events from a calendar, then delete it. -### resolve_cluster [_resolve_cluster] +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-delete-calendar) -Resolve the cluster. Resolve the specified index expressions to return information about each cluster, including the local cluster, if included. Multiple patterns and remote clusters are supported. +```ts +client.ml.deleteCalendar({ calendar_id }) +``` -This endpoint is useful before doing a cross-cluster search in order to determine which remote clusters should be included in a search. +### Arguments [_arguments_ml.delete_calendar] -You use the same index expression with this endpoint as you would for cross-cluster search. Index and cluster exclusions are also supported with this endpoint. +#### Request (object) [_request_ml.delete_calendar] +- **`calendar_id` (string)**: A string that uniquely identifies a calendar. -For each cluster in the index expression, information is returned about: +## client.ml.deleteCalendarEvent [_ml.delete_calendar_event] +Delete events from a calendar. -* Whether the querying ("local") cluster is currently connected to each remote cluster in the index expression scope. -* Whether each remote cluster is configured with `skip_unavailable` as `true` or `false`. -* Whether there are any indices, aliases, or data streams on that cluster that match the index expression. -* Whether the search is likely to have errors returned when you do the cross-cluster search (including any authorization errors if you do not have permission to query the index). -* Cluster version information, including the Elasticsearch server version. +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-delete-calendar-event) -For example, `GET /_resolve/cluster/my-index-*,cluster*:my-index-*` returns information about the local cluster and all remotely configured clusters that start with the alias `cluster*`. Each cluster returns information about whether it has any indices, aliases or data streams that match `my-index-*`. +```ts +client.ml.deleteCalendarEvent({ calendar_id, event_id }) +``` -**Advantages of using this endpoint before a cross-cluster search** +### Arguments [_arguments_ml.delete_calendar_event] -You may want to exclude a cluster or index from a search when: +#### Request (object) [_request_ml.delete_calendar_event] +- **`calendar_id` (string)**: A string that uniquely identifies a calendar. +- **`event_id` (string)**: Identifier for the scheduled event. +You can obtain this identifier by using the get calendar events API. -* A remote cluster is not currently connected and is configured with `skip_unavailable=false`. Running a cross-cluster search under those conditions will cause the entire search to fail. -* A cluster has no matching indices, aliases or data streams for the index expression (or your user does not have permissions to search them). For example, suppose your index expression is `logs*,remote1:logs*` and the remote1 cluster has no indices, aliases or data streams that match `logs*`. In that case, that cluster will return no results from that cluster if you include it in a cross-cluster search. -* The index expression (combined with any query parameters you specify) will likely cause an exception to be thrown when you do the search. In these cases, the "error" field in the `_resolve/cluster` response will be present. (This is also where security/permission errors will be shown.) -* A remote cluster is an older version that does not support the feature you want to use in your search. +## client.ml.deleteCalendarJob [_ml.delete_calendar_job] +Delete anomaly jobs from a calendar. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-resolve-cluster) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-delete-calendar-job) ```ts -client.indices.resolveCluster({ name }) +client.ml.deleteCalendarJob({ calendar_id, job_id }) ``` +### Arguments [_arguments_ml.delete_calendar_job] -### Arguments [_arguments_212] +#### Request (object) [_request_ml.delete_calendar_job] +- **`calendar_id` (string)**: A string that uniquely identifies a calendar. +- **`job_id` (string | string[])**: An identifier for the anomaly detection jobs. It can be a job identifier, a group name, or a +list of jobs or groups. -* **Request (object):** +## client.ml.deleteDataFrameAnalytics [_ml.delete_data_frame_analytics] +Delete a data frame analytics job. - * **`name` (string | string[])**: Comma-separated name(s) or index pattern(s) of the indices, aliases, and data streams to resolve. Resources on remote clusters can be specified using the ``:`` syntax. - * **`allow_no_indices` (Optional, boolean)**: If false, the request returns an error if any wildcard expression, index alias, or _all value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting foo*,bar* returns an error if an index starts with foo but no index starts with bar. - * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. - * **`ignore_throttled` (Optional, boolean)**: If true, concrete, expanded or aliased indices are ignored when frozen. Defaults to false. - * **`ignore_unavailable` (Optional, boolean)**: If false, the request returns an error if it targets a missing or closed index. Defaults to false. +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-delete-data-frame-analytics) +```ts +client.ml.deleteDataFrameAnalytics({ id }) +``` +### Arguments [_arguments_ml.delete_data_frame_analytics] -### resolve_index [_resolve_index] +#### Request (object) [_request_ml.delete_data_frame_analytics] +- **`id` (string)**: Identifier for the data frame analytics job. +- **`force` (Optional, boolean)**: If `true`, it deletes a job that is not stopped; this method is quicker than stopping and deleting the job. +- **`timeout` (Optional, string | -1 | 0)**: The time to wait for the job to be deleted. -Resolve indices. Resolve the names and/or index patterns for indices, aliases, and data streams. Multiple patterns and remote clusters are supported. +## client.ml.deleteDatafeed [_ml.delete_datafeed] +Delete a datafeed. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-resolve-index) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-delete-datafeed) ```ts -client.indices.resolveIndex({ name }) +client.ml.deleteDatafeed({ datafeed_id }) ``` +### Arguments [_arguments_ml.delete_datafeed] -### Arguments [_arguments_213] +#### Request (object) [_request_ml.delete_datafeed] +- **`datafeed_id` (string)**: A numerical character string that uniquely identifies the datafeed. This +identifier can contain lowercase alphanumeric characters (a-z and 0-9), +hyphens, and underscores. It must start and end with alphanumeric +characters. +- **`force` (Optional, boolean)**: Use to forcefully delete a started datafeed; this method is quicker than +stopping and deleting the datafeed. -* **Request (object):** +## client.ml.deleteExpiredData [_ml.delete_expired_data] +Delete expired ML data. - * **`name` (string | string[])**: Comma-separated name(s) or index pattern(s) of the indices, aliases, and data streams to resolve. Resources on remote clusters can be specified using the ``:`` syntax. - * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. - * **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. - * **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. +Delete all job results, model snapshots and forecast data that have exceeded +their retention days period. Machine learning state documents that are not +associated with any job are also deleted. +You can limit the request to a single or set of anomaly detection jobs by +using a job identifier, a group name, a list of jobs, or a +wildcard expression. You can delete expired data for all anomaly detection +jobs by using `_all`, by specifying `*` as the ``, or by omitting the +``. +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-delete-expired-data) +```ts +client.ml.deleteExpiredData({ ... }) +``` -### rollover [_rollover] +### Arguments [_arguments_ml.delete_expired_data] -Roll over to a new index. TIP: It is recommended to use the index lifecycle rollover action to automate rollovers. +#### Request (object) [_request_ml.delete_expired_data] +- **`job_id` (Optional, string)**: Identifier for an anomaly detection job. It can be a job identifier, a +group name, or a wildcard expression. +- **`requests_per_second` (Optional, float)**: The desired requests per second for the deletion processes. The default +behavior is no throttling. +- **`timeout` (Optional, string | -1 | 0)**: How long can the underlying delete processes run until they are canceled. -The rollover API creates a new index for a data stream or index alias. The API behavior depends on the rollover target. +## client.ml.deleteFilter [_ml.delete_filter] +Delete a filter. -**Roll over a data stream** +If an anomaly detection job references the filter, you cannot delete the +filter. You must update or delete the job before you can delete the filter. -If you roll over a data stream, the API creates a new write index for the stream. The stream’s previous write index becomes a regular backing index. A rollover also increments the data stream’s generation. +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-delete-filter) -**Roll over an index alias with a write index** +```ts +client.ml.deleteFilter({ filter_id }) +``` -::::{tip} -Prior to Elasticsearch 7.9, you’d typically use an index alias with a write index to manage time series data. Data streams replace this functionality, require less maintenance, and automatically integrate with data tiers. -:::: +### Arguments [_arguments_ml.delete_filter] +#### Request (object) [_request_ml.delete_filter] +- **`filter_id` (string)**: A string that uniquely identifies a filter. -If an index alias points to multiple indices, one of the indices must be a write index. The rollover API creates a new write index for the alias with `is_write_index` set to `true`. The API also `sets is_write_index` to `false` for the previous write index. +## client.ml.deleteForecast [_ml.delete_forecast] +Delete forecasts from a job. -**Roll over an index alias with one index** +By default, forecasts are retained for 14 days. You can specify a +different retention period with the `expires_in` parameter in the forecast +jobs API. The delete forecast API enables you to delete one or more +forecasts before they expire. -If you roll over an index alias that points to only one index, the API creates a new index for the alias and removes the original index from the alias. +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-delete-forecast) -::::{note} -A rollover creates a new index and is subject to the `wait_for_active_shards` setting. -:::: +```ts +client.ml.deleteForecast({ job_id }) +``` +### Arguments [_arguments_ml.delete_forecast] -**Increment index names for an alias** +#### Request (object) [_request_ml.delete_forecast] +- **`job_id` (string)**: Identifier for the anomaly detection job. +- **`forecast_id` (Optional, string)**: A list of forecast identifiers. If you do not specify +this optional parameter or if you specify `_all` or `*` the API deletes +all forecasts from the job. +- **`allow_no_forecasts` (Optional, boolean)**: Specifies whether an error occurs when there are no forecasts. In +particular, if this parameter is set to `false` and there are no +forecasts associated with the job, attempts to delete all forecasts +return an error. +- **`timeout` (Optional, string | -1 | 0)**: Specifies the period of time to wait for the completion of the delete +operation. When this period of time elapses, the API fails and returns an +error. -When you roll over an index alias, you can specify a name for the new index. If you don’t specify a name and the current index ends with `-` and a number, such as `my-index-000001` or `my-index-3`, the new index name increments that number. For example, if you roll over an alias with a current index of `my-index-000001`, the rollover creates a new index named `my-index-000002`. This number is always six characters and zero-padded, regardless of the previous index’s name. +## client.ml.deleteJob [_ml.delete_job] +Delete an anomaly detection job. -If you use an index alias for time series data, you can use date math in the index name to track the rollover date. For example, you can create an alias that points to an index named ``. If you create the index on May 6, 2099, the index’s name is `my-index-2099.05.06-000001`. If you roll over the alias on May 7, 2099, the new index’s name is `my-index-2099.05.07-000002`. +All job configuration, model state and results are deleted. +It is not currently possible to delete multiple jobs using wildcards or a +comma separated list. If you delete a job that has a datafeed, the request +first tries to delete the datafeed. This behavior is equivalent to calling +the delete datafeed API with the same timeout and force parameters as the +delete job request. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-rollover) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-delete-job) ```ts -client.indices.rollover({ alias }) +client.ml.deleteJob({ job_id }) ``` +### Arguments [_arguments_ml.delete_job] -### Arguments [_arguments_214] +#### Request (object) [_request_ml.delete_job] +- **`job_id` (string)**: Identifier for the anomaly detection job. +- **`force` (Optional, boolean)**: Use to forcefully delete an opened job; this method is quicker than +closing and deleting the job. +- **`delete_user_annotations` (Optional, boolean)**: Specifies whether annotations that have been added by the +user should be deleted along with any auto-generated annotations when the job is +reset. +- **`wait_for_completion` (Optional, boolean)**: Specifies whether the request should return immediately or wait until the +job deletion completes. -* **Request (object):** +## client.ml.deleteModelSnapshot [_ml.delete_model_snapshot] +Delete a model snapshot. - * **`alias` (string)**: Name of the data stream or index alias to roll over. - * **`new_index` (Optional, string)**: Name of the index to create. Supports date math. Data streams do not support this parameter. - * **`aliases` (Optional, Record)**: Aliases for the target index. Data streams do not support this parameter. - * **`conditions` (Optional, { min_age, max_age, max_age_millis, min_docs, max_docs, max_size, max_size_bytes, min_size, min_size_bytes, max_primary_shard_size, max_primary_shard_size_bytes, min_primary_shard_size, min_primary_shard_size_bytes, max_primary_shard_docs, min_primary_shard_docs })**: Conditions for the rollover. If specified, Elasticsearch only performs the rollover if the current index satisfies these conditions. If this parameter is not specified, Elasticsearch performs the rollover unconditionally. If conditions are specified, at least one of them must be a `max_*` condition. The index will rollover if any `max_*` condition is satisfied and all `min_*` conditions are satisfied. - * **`mappings` (Optional, { all_field, date_detection, dynamic, dynamic_date_formats, dynamic_templates, _field_names, index_field, _meta, numeric_detection, properties, _routing, _size, _source, runtime, enabled, subobjects, _data_stream_timestamp })**: Mapping for fields in the index. If specified, this mapping can include field names, field data types, and mapping paramaters. - * **`settings` (Optional, Record)**: Configuration options for the index. Data streams do not support this parameter. - * **`dry_run` (Optional, boolean)**: If `true`, checks whether the current index satisfies the specified conditions but does not perform a rollover. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - * **`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))**: The number of shard copies that must be active before proceeding with the operation. Set to all or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). +You cannot delete the active model snapshot. To delete that snapshot, first +revert to a different one. To identify the active model snapshot, refer to +the `model_snapshot_id` in the results from the get jobs API. +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-delete-model-snapshot) +```ts +client.ml.deleteModelSnapshot({ job_id, snapshot_id }) +``` -### segments [_segments_2] +### Arguments [_arguments_ml.delete_model_snapshot] -Get index segments. Get low-level information about the Lucene segments in index shards. For data streams, the API returns information about the stream’s backing indices. +#### Request (object) [_request_ml.delete_model_snapshot] +- **`job_id` (string)**: Identifier for the anomaly detection job. +- **`snapshot_id` (string)**: Identifier for the model snapshot. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-segments) +## client.ml.deleteTrainedModel [_ml.delete_trained_model] +Delete an unreferenced trained model. + +The request deletes a trained inference model that is not referenced by an ingest pipeline. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-delete-trained-model) ```ts -client.indices.segments({ ... }) +client.ml.deleteTrainedModel({ model_id }) ``` +### Arguments [_arguments_ml.delete_trained_model] -### Arguments [_arguments_215] +#### Request (object) [_request_ml.delete_trained_model] +- **`model_id` (string)**: The unique identifier of the trained model. +- **`force` (Optional, boolean)**: Forcefully deletes a trained model that is referenced by ingest pipelines or has a started deployment. +- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. -* **Request (object):** +## client.ml.deleteTrainedModelAlias [_ml.delete_trained_model_alias] +Delete a trained model alias. - * **`index` (Optional, string | string[])**: List of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. - * **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. - * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. - * **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. +This API deletes an existing model alias that refers to a trained model. If +the model alias is missing or refers to a model other than the one identified +by the `model_id`, this API returns an error. +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-delete-trained-model-alias) +```ts +client.ml.deleteTrainedModelAlias({ model_alias, model_id }) +``` -### shard_stores [_shard_stores] - -Get index shard stores. Get store information about replica shards in one or more indices. For data streams, the API retrieves store information for the stream’s backing indices. +### Arguments [_arguments_ml.delete_trained_model_alias] -The index shard stores API returns the following information: +#### Request (object) [_request_ml.delete_trained_model_alias] +- **`model_alias` (string)**: The model alias to delete. +- **`model_id` (string)**: The trained model ID to which the model alias refers. -* The node on which each replica shard exists. -* The allocation ID for each replica shard. -* A unique ID for each replica shard. -* Any errors encountered while opening the shard index or from an earlier failure. +## client.ml.estimateModelMemory [_ml.estimate_model_memory] +Estimate job model memory usage. -By default, the API returns store information only for primary shards that are unassigned or have one or more unassigned replica shards. +Make an estimation of the memory usage for an anomaly detection job model. +The estimate is based on analysis configuration details for the job and cardinality +estimates for the fields it references. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-shard-stores) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-estimate-model-memory) ```ts -client.indices.shardStores({ ... }) +client.ml.estimateModelMemory({ ... }) ``` +### Arguments [_arguments_ml.estimate_model_memory] -### Arguments [_arguments_216] +#### Request (object) [_request_ml.estimate_model_memory] +- **`analysis_config` (Optional, { bucket_span, categorization_analyzer, categorization_field_name, categorization_filters, detectors, influencers, latency, model_prune_window, multivariate_by_fields, per_partition_categorization, summary_count_field_name })**: For a list of the properties that you can specify in the +`analysis_config` component of the body of this API. +- **`max_bucket_cardinality` (Optional, Record)**: Estimates of the highest cardinality in a single bucket that is observed +for influencer fields over the time period that the job analyzes data. +To produce a good answer, values must be provided for all influencer +fields. Providing values for fields that are not listed as `influencers` +has no effect on the estimation. +- **`overall_cardinality` (Optional, Record)**: Estimates of the cardinality that is observed for fields over the whole +time period that the job analyzes data. To produce a good answer, values +must be provided for fields referenced in the `by_field_name`, +`over_field_name` and `partition_field_name` of any detectors. Providing +values for other fields has no effect on the estimation. It can be +omitted from the request if no detectors have a `by_field_name`, +`over_field_name` or `partition_field_name`. -* **Request (object):** +## client.ml.evaluateDataFrame [_ml.evaluate_data_frame] +Evaluate data frame analytics. - * **`index` (Optional, string | string[])**: List of data streams, indices, and aliases used to limit the request. - * **`allow_no_indices` (Optional, boolean)**: If false, the request returns an error if any wildcard expression, index alias, or _all value targets only missing or closed indices. This behavior applies even if the request targets other open indices. - * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. - * **`ignore_unavailable` (Optional, boolean)**: If true, missing or closed indices are not included in the response. - * **`status` (Optional, Enum("green" | "yellow" | "red" | "all") | Enum("green" | "yellow" | "red" | "all")[])**: List of shard health statuses used to limit the request. +The API packages together commonly used evaluation metrics for various types +of machine learning features. This has been designed for use on indexes +created by data frame analytics. Evaluation requires both a ground truth +field and an analytics result field to be present. +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-evaluate-data-frame) +```ts +client.ml.evaluateDataFrame({ evaluation, index }) +``` -### shrink [_shrink] +### Arguments [_arguments_ml.evaluate_data_frame] -Shrink an index. Shrink an index into a new index with fewer primary shards. +#### Request (object) [_request_ml.evaluate_data_frame] +- **`evaluation` ({ classification, outlier_detection, regression })**: Defines the type of evaluation you want to perform. +- **`index` (string)**: Defines the `index` in which the evaluation will be performed. +- **`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: A query clause that retrieves a subset of data from the source index. -Before you can shrink an index: +## client.ml.explainDataFrameAnalytics [_ml.explain_data_frame_analytics] +Explain data frame analytics config. -* The index must be read-only. -* A copy of every shard in the index must reside on the same node. -* The index must have a green health status. +This API provides explanations for a data frame analytics config that either +exists already or one that has not been created yet. The following +explanations are provided: +* which fields are included or not in the analysis and why, +* how much memory is estimated to be required. The estimate can be used when deciding the appropriate value for model_memory_limit setting later on. +If you have object fields or fields that are excluded via source filtering, they are not included in the explanation. -To make shard allocation easier, we recommend you also remove the index’s replica shards. You can later re-add replica shards as part of the shrink operation. +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-explain-data-frame-analytics) -The requested number of primary shards in the target index must be a factor of the number of shards in the source index. For example an index with 8 primary shards can be shrunk into 4, 2 or 1 primary shards or an index with 15 primary shards can be shrunk into 5, 3 or 1. If the number of shards in the index is a prime number it can only be shrunk into a single primary shard Before shrinking, a (primary or replica) copy of every shard in the index must be present on the same node. +```ts +client.ml.explainDataFrameAnalytics({ ... }) +``` -The current write index on a data stream cannot be shrunk. In order to shrink the current write index, the data stream must first be rolled over so that a new write index is created and then the previous write index can be shrunk. +### Arguments [_arguments_ml.explain_data_frame_analytics] + +#### Request (object) [_request_ml.explain_data_frame_analytics] +- **`id` (Optional, string)**: Identifier for the data frame analytics job. This identifier can contain +lowercase alphanumeric characters (a-z and 0-9), hyphens, and +underscores. It must start and end with alphanumeric characters. +- **`source` (Optional, { index, query, runtime_mappings, _source })**: The configuration of how to source the analysis data. It requires an +index. Optionally, query and _source may be specified. +- **`dest` (Optional, { index, results_field })**: The destination configuration, consisting of index and optionally +results_field (ml by default). +- **`analysis` (Optional, { classification, outlier_detection, regression })**: The analysis configuration, which contains the information necessary to +perform one of the following types of analysis: classification, outlier +detection, or regression. +- **`description` (Optional, string)**: A description of the job. +- **`model_memory_limit` (Optional, string)**: The approximate maximum amount of memory resources that are permitted for +analytical processing. If your `elasticsearch.yml` file contains an +`xpack.ml.max_model_memory_limit` setting, an error occurs when you try to +create data frame analytics jobs that have `model_memory_limit` values +greater than that setting. +- **`max_num_threads` (Optional, number)**: The maximum number of threads to be used by the analysis. Using more +threads may decrease the time necessary to complete the analysis at the +cost of using more CPU. Note that the process may use additional threads +for operational functionality other than the analysis itself. +- **`analyzed_fields` (Optional, { includes, excludes })**: Specify includes and/or excludes patterns to select which fields will be +included in the analysis. The patterns specified in excludes are applied +last, therefore excludes takes precedence. In other words, if the same +field is specified in both includes and excludes, then the field will not +be included in the analysis. +- **`allow_lazy_start` (Optional, boolean)**: Specifies whether this job can start when there is insufficient machine +learning node capacity for it to be immediately assigned to a node. + +## client.ml.flushJob [_ml.flush_job] +Force buffered data to be processed. +The flush jobs API is only applicable when sending data for analysis using +the post data API. Depending on the content of the buffer, then it might +additionally calculate new results. Both flush and close operations are +similar, however the flush is more efficient if you are expecting to send +more data for analysis. When flushing, the job remains open and is available +to continue analyzing data. A close operation additionally prunes and +persists the model state to disk and the job must be opened again before +analyzing further data. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-flush-job) -A shrink operation: +```ts +client.ml.flushJob({ job_id }) +``` -* Creates a new target index with the same definition as the source index, but with a smaller number of primary shards. -* Hard-links segments from the source index into the target index. If the file system does not support hard-linking, then all segments are copied into the new index, which is a much more time consuming process. Also if using multiple data paths, shards on different data paths require a full copy of segment files if they are not on the same disk since hardlinks do not work across disks. -* Recovers the target index as though it were a closed index which had just been re-opened. Recovers shards to the `.routing.allocation.initial_recovery._id` index setting. +### Arguments [_arguments_ml.flush_job] -::::{important} -Indices can only be shrunk if they satisfy the following requirements: -:::: +#### Request (object) [_request_ml.flush_job] +- **`job_id` (string)**: Identifier for the anomaly detection job. +- **`advance_time` (Optional, string | Unit)**: Refer to the description for the `advance_time` query parameter. +- **`calc_interim` (Optional, boolean)**: Refer to the description for the `calc_interim` query parameter. +- **`end` (Optional, string | Unit)**: Refer to the description for the `end` query parameter. +- **`skip_time` (Optional, string | Unit)**: Refer to the description for the `skip_time` query parameter. +- **`start` (Optional, string | Unit)**: Refer to the description for the `start` query parameter. +## client.ml.forecast [_ml.forecast] +Predict future behavior of a time series. -* The target index must not exist. -* The source index must have more primary shards than the target index. -* The number of primary shards in the target index must be a factor of the number of primary shards in the source index. The source index must have more primary shards than the target index. -* The index must not contain more than 2,147,483,519 documents in total across all shards that will be shrunk into a single shard on the target index as this is the maximum number of docs that can fit into a single shard. -* The node handling the shrink process must have sufficient free disk space to accommodate a second copy of the existing index. +Forecasts are not supported for jobs that perform population analysis; an +error occurs if you try to create a forecast for a job that has an +`over_field_name` in its configuration. Forcasts predict future behavior +based on historical data. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-shrink) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-forecast) ```ts -client.indices.shrink({ index, target }) +client.ml.forecast({ job_id }) ``` +### Arguments [_arguments_ml.forecast] -### Arguments [_arguments_217] - -* **Request (object):** - - * **`index` (string)**: Name of the source index to shrink. - * **`target` (string)**: Name of the target index to create. - * **`aliases` (Optional, Record)**: The key is the alias name. Index alias names support date math. - * **`settings` (Optional, Record)**: Configuration options for the target index. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - * **`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))**: The number of shard copies that must be active before proceeding with the operation. Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). +#### Request (object) [_request_ml.forecast] +- **`job_id` (string)**: Identifier for the anomaly detection job. The job must be open when you +create a forecast; otherwise, an error occurs. +- **`duration` (Optional, string | -1 | 0)**: Refer to the description for the `duration` query parameter. +- **`expires_in` (Optional, string | -1 | 0)**: Refer to the description for the `expires_in` query parameter. +- **`max_model_memory` (Optional, string)**: Refer to the description for the `max_model_memory` query parameter. +## client.ml.getBuckets [_ml.get_buckets] +Get anomaly detection job results for buckets. +The API presents a chronological view of the records, grouped by bucket. +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-get-buckets) -### simulate_index_template [_simulate_index_template] +```ts +client.ml.getBuckets({ job_id }) +``` -Simulate an index. Get the index configuration that would be applied to the specified index from an existing index template. +### Arguments [_arguments_ml.get_buckets] + +#### Request (object) [_request_ml.get_buckets] +- **`job_id` (string)**: Identifier for the anomaly detection job. +- **`timestamp` (Optional, string | Unit)**: The timestamp of a single bucket result. If you do not specify this +parameter, the API returns information about all buckets. +- **`anomaly_score` (Optional, number)**: Refer to the description for the `anomaly_score` query parameter. +- **`desc` (Optional, boolean)**: Refer to the description for the `desc` query parameter. +- **`end` (Optional, string | Unit)**: Refer to the description for the `end` query parameter. +- **`exclude_interim` (Optional, boolean)**: Refer to the description for the `exclude_interim` query parameter. +- **`expand` (Optional, boolean)**: Refer to the description for the `expand` query parameter. +- **`page` (Optional, { from, size })** +- **`sort` (Optional, string)**: Refer to the desription for the `sort` query parameter. +- **`start` (Optional, string | Unit)**: Refer to the description for the `start` query parameter. +- **`from` (Optional, number)**: Skips the specified number of buckets. +- **`size` (Optional, number)**: Specifies the maximum number of buckets to obtain. + +## client.ml.getCalendarEvents [_ml.get_calendar_events] +Get info about events in calendars. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-simulate-index-template) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-get-calendar-events) ```ts -client.indices.simulateIndexTemplate({ name }) +client.ml.getCalendarEvents({ calendar_id }) ``` +### Arguments [_arguments_ml.get_calendar_events] -### Arguments [_arguments_218] +#### Request (object) [_request_ml.get_calendar_events] +- **`calendar_id` (string)**: A string that uniquely identifies a calendar. You can get information for multiple calendars by using a list of ids or a wildcard expression. You can get information for all calendars by using `_all` or `*` or by omitting the calendar identifier. +- **`end` (Optional, string | Unit)**: Specifies to get events with timestamps earlier than this time. +- **`from` (Optional, number)**: Skips the specified number of events. +- **`job_id` (Optional, string)**: Specifies to get events for a specific anomaly detection job identifier or job group. It must be used with a calendar identifier of `_all` or `*`. +- **`size` (Optional, number)**: Specifies the maximum number of events to obtain. +- **`start` (Optional, string | Unit)**: Specifies to get events with timestamps after this time. -* **Request (object):** +## client.ml.getCalendars [_ml.get_calendars] +Get calendar configuration info. - * **`name` (string)**: Name of the index to simulate - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - * **`include_defaults` (Optional, boolean)**: If true, returns all relevant default configurations for the index template. +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-get-calendars) +```ts +client.ml.getCalendars({ ... }) +``` +### Arguments [_arguments_ml.get_calendars] -### simulate_template [_simulate_template] +#### Request (object) [_request_ml.get_calendars] +- **`calendar_id` (Optional, string)**: A string that uniquely identifies a calendar. You can get information for multiple calendars by using a list of ids or a wildcard expression. You can get information for all calendars by using `_all` or `*` or by omitting the calendar identifier. +- **`page` (Optional, { from, size })**: This object is supported only when you omit the calendar identifier. +- **`from` (Optional, number)**: Skips the specified number of calendars. This parameter is supported only when you omit the calendar identifier. +- **`size` (Optional, number)**: Specifies the maximum number of calendars to obtain. This parameter is supported only when you omit the calendar identifier. -Simulate an index template. Get the index configuration that would be applied by a particular index template. +## client.ml.getCategories [_ml.get_categories] +Get anomaly detection job results for categories. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-simulate-template) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-get-categories) ```ts -client.indices.simulateTemplate({ ... }) +client.ml.getCategories({ job_id }) ``` +### Arguments [_arguments_ml.get_categories] -### Arguments [_arguments_219] - -* **Request (object):** +#### Request (object) [_request_ml.get_categories] +- **`job_id` (string)**: Identifier for the anomaly detection job. +- **`category_id` (Optional, string)**: Identifier for the category, which is unique in the job. If you specify +neither the category ID nor the partition_field_value, the API returns +information about all categories. If you specify only the +partition_field_value, it returns information about all categories for +the specified partition. +- **`page` (Optional, { from, size })**: Configures pagination. +This parameter has the `from` and `size` properties. +- **`from` (Optional, number)**: Skips the specified number of categories. +- **`partition_field_value` (Optional, string)**: Only return categories for the specified partition. +- **`size` (Optional, number)**: Specifies the maximum number of categories to obtain. - * **`name` (Optional, string)**: Name of the index template to simulate. To test a template configuration before you add it to the cluster, omit this parameter and specify the template configuration in the request body. - * **`allow_auto_create` (Optional, boolean)**: This setting overrides the value of the `action.auto_create_index` cluster setting. If set to `true` in a template, then indices can be automatically created using that template even if auto-creation of indices is disabled via `actions.auto_create_index`. If set to `false`, then indices or data streams matching the template must always be explicitly created, and may never be automatically created. - * **`index_patterns` (Optional, string | string[])**: Array of wildcard (`*`) expressions used to match the names of data streams and indices during creation. - * **`composed_of` (Optional, string[])**: An ordered list of component template names. Component templates are merged in the order specified, meaning that the last component template specified has the highest precedence. - * **`template` (Optional, { aliases, mappings, settings, lifecycle })**: Template to be applied. It may optionally include an `aliases`, `mappings`, or `settings` configuration. - * **`data_stream` (Optional, { hidden, allow_custom_routing })**: If this object is included, the template is used to create data streams and their backing indices. Supports an empty object. Data streams require a matching index template with a `data_stream` object. - * **`priority` (Optional, number)**: Priority to determine index template precedence when a new data stream or index is created. The index template with the highest priority is chosen. If no priority is specified the template is treated as though it is of priority 0 (lowest priority). This number is not automatically generated by Elasticsearch. - * **`version` (Optional, number)**: Version number used to manage index templates externally. This number is not automatically generated by Elasticsearch. - * **`_meta` (Optional, Record)**: Optional user metadata about the index template. May have any contents. This map is not automatically generated by Elasticsearch. - * **`ignore_missing_component_templates` (Optional, string[])**: The configuration option ignore_missing_component_templates can be used when an index template references a component template that might not exist - * **`deprecated` (Optional, boolean)**: Marks this index template as deprecated. When creating or updating a non-deprecated index template that uses deprecated components, Elasticsearch will emit a deprecation warning. - * **`create` (Optional, boolean)**: If true, the template passed in the body is only used if no existing templates match the same index patterns. If false, the simulation uses the template with the highest priority. Note that the template is not permanently added or updated in either case; it is only used for the simulation. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - * **`include_defaults` (Optional, boolean)**: If true, returns all relevant default configurations for the index template. +## client.ml.getDataFrameAnalytics [_ml.get_data_frame_analytics] +Get data frame analytics job configuration info. +You can get information for multiple data frame analytics jobs in a single +API request by using a list of data frame analytics jobs or a +wildcard expression. +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-get-data-frame-analytics) +```ts +client.ml.getDataFrameAnalytics({ ... }) +``` -### split [_split] +### Arguments [_arguments_ml.get_data_frame_analytics] -Split an index. Split an index into a new index with more primary shards. * Before you can split an index: +#### Request (object) [_request_ml.get_data_frame_analytics] +- **`id` (Optional, string)**: Identifier for the data frame analytics job. If you do not specify this +option, the API returns information for the first hundred data frame +analytics jobs. +- **`allow_no_match` (Optional, boolean)**: Specifies what to do when the request: -* The index must be read-only. -* The cluster health status must be green. +1. Contains wildcard expressions and there are no data frame analytics +jobs that match. +2. Contains the `_all` string or no identifiers and there are no matches. +3. Contains wildcard expressions and there are only partial matches. -You can do make an index read-only with the following request using the add index block API: +The default value returns an empty data_frame_analytics array when there +are no matches and the subset of results when there are partial matches. +If this parameter is `false`, the request returns a 404 status code when +there are no matches or only partial matches. +- **`from` (Optional, number)**: Skips the specified number of data frame analytics jobs. +- **`size` (Optional, number)**: Specifies the maximum number of data frame analytics jobs to obtain. +- **`exclude_generated` (Optional, boolean)**: Indicates if certain fields should be removed from the configuration on +retrieval. This allows the configuration to be in an acceptable format to +be retrieved and then added to another cluster. -``` -PUT /my_source_index/_block/write -``` +## client.ml.getDataFrameAnalyticsStats [_ml.get_data_frame_analytics_stats] +Get data frame analytics job stats. -The current write index on a data stream cannot be split. In order to split the current write index, the data stream must first be rolled over so that a new write index is created and then the previous write index can be split. +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-get-data-frame-analytics-stats) -The number of times the index can be split (and the number of shards that each original shard can be split into) is determined by the `index.number_of_routing_shards` setting. The number of routing shards specifies the hashing space that is used internally to distribute documents across shards with consistent hashing. For instance, a 5 shard index with `number_of_routing_shards` set to 30 (5 x 2 x 3) could be split by a factor of 2 or 3. +```ts +client.ml.getDataFrameAnalyticsStats({ ... }) +``` -A split operation: +### Arguments [_arguments_ml.get_data_frame_analytics_stats] -* Creates a new target index with the same definition as the source index, but with a larger number of primary shards. -* Hard-links segments from the source index into the target index. If the file system doesn’t support hard-linking, all segments are copied into the new index, which is a much more time consuming process. -* Hashes all documents again, after low level files are created, to delete documents that belong to a different shard. -* Recovers the target index as though it were a closed index which had just been re-opened. +#### Request (object) [_request_ml.get_data_frame_analytics_stats] +- **`id` (Optional, string)**: Identifier for the data frame analytics job. If you do not specify this +option, the API returns information for the first hundred data frame +analytics jobs. +- **`allow_no_match` (Optional, boolean)**: Specifies what to do when the request: -::::{important} -Indices can only be split if they satisfy the following requirements: -:::: +1. Contains wildcard expressions and there are no data frame analytics +jobs that match. +2. Contains the `_all` string or no identifiers and there are no matches. +3. Contains wildcard expressions and there are only partial matches. +The default value returns an empty data_frame_analytics array when there +are no matches and the subset of results when there are partial matches. +If this parameter is `false`, the request returns a 404 status code when +there are no matches or only partial matches. +- **`from` (Optional, number)**: Skips the specified number of data frame analytics jobs. +- **`size` (Optional, number)**: Specifies the maximum number of data frame analytics jobs to obtain. +- **`verbose` (Optional, boolean)**: Defines whether the stats response should be verbose. -* The target index must not exist. -* The source index must have fewer primary shards than the target index. -* The number of primary shards in the target index must be a multiple of the number of primary shards in the source index. -* The node handling the split process must have sufficient free disk space to accommodate a second copy of the existing index. +## client.ml.getDatafeedStats [_ml.get_datafeed_stats] +Get datafeed stats. +You can get statistics for multiple datafeeds in a single API request by +using a list of datafeeds or a wildcard expression. You can +get statistics for all datafeeds by using `_all`, by specifying `*` as the +``, or by omitting the ``. If the datafeed is stopped, the +only information you receive is the `datafeed_id` and the `state`. +This API returns a maximum of 10,000 datafeeds. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-split) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-get-datafeed-stats) ```ts -client.indices.split({ index, target }) +client.ml.getDatafeedStats({ ... }) ``` +### Arguments [_arguments_ml.get_datafeed_stats] -### Arguments [_arguments_220] +#### Request (object) [_request_ml.get_datafeed_stats] +- **`datafeed_id` (Optional, string | string[])**: Identifier for the datafeed. It can be a datafeed identifier or a +wildcard expression. If you do not specify one of these options, the API +returns information about all datafeeds. +- **`allow_no_match` (Optional, boolean)**: Specifies what to do when the request: -* **Request (object):** +1. Contains wildcard expressions and there are no datafeeds that match. +2. Contains the `_all` string or no identifiers and there are no matches. +3. Contains wildcard expressions and there are only partial matches. - * **`index` (string)**: Name of the source index to split. - * **`target` (string)**: Name of the target index to create. - * **`aliases` (Optional, Record)**: Aliases for the resulting index. - * **`settings` (Optional, Record)**: Configuration options for the target index. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - * **`wait_for_active_shards` (Optional, number | Enum("all" | "index-setting"))**: The number of shard copies that must be active before proceeding with the operation. Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). +The default value is `true`, which returns an empty `datafeeds` array +when there are no matches and the subset of results when there are +partial matches. If this parameter is `false`, the request returns a +`404` status code when there are no matches or only partial matches. +## client.ml.getDatafeeds [_ml.get_datafeeds] +Get datafeeds configuration info. +You can get information for multiple datafeeds in a single API request by +using a list of datafeeds or a wildcard expression. You can +get information for all datafeeds by using `_all`, by specifying `*` as the +``, or by omitting the ``. +This API returns a maximum of 10,000 datafeeds. +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-get-datafeeds) -### stats [_stats_4] +```ts +client.ml.getDatafeeds({ ... }) +``` -Get index statistics. For data streams, the API retrieves statistics for the stream’s backing indices. +### Arguments [_arguments_ml.get_datafeeds] -By default, the returned statistics are index-level with `primaries` and `total` aggregations. `primaries` are the values for only the primary shards. `total` are the accumulated values for both primary and replica shards. +#### Request (object) [_request_ml.get_datafeeds] +- **`datafeed_id` (Optional, string | string[])**: Identifier for the datafeed. It can be a datafeed identifier or a +wildcard expression. If you do not specify one of these options, the API +returns information about all datafeeds. +- **`allow_no_match` (Optional, boolean)**: Specifies what to do when the request: -To get shard-level statistics, set the `level` parameter to `shards`. +1. Contains wildcard expressions and there are no datafeeds that match. +2. Contains the `_all` string or no identifiers and there are no matches. +3. Contains wildcard expressions and there are only partial matches. -::::{note} -When moving to another node, the shard-level statistics for a shard are cleared. Although the shard is no longer part of the node, that node retains any node-level statistics to which the shard contributed. -:::: +The default value is `true`, which returns an empty `datafeeds` array +when there are no matches and the subset of results when there are +partial matches. If this parameter is `false`, the request returns a +`404` status code when there are no matches or only partial matches. +- **`exclude_generated` (Optional, boolean)**: Indicates if certain fields should be removed from the configuration on +retrieval. This allows the configuration to be in an acceptable format to +be retrieved and then added to another cluster. +## client.ml.getFilters [_ml.get_filters] +Get filters. +You can get a single filter or all filters. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-stats) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-get-filters) ```ts -client.indices.stats({ ... }) +client.ml.getFilters({ ... }) ``` +### Arguments [_arguments_ml.get_filters] -### Arguments [_arguments_221] +#### Request (object) [_request_ml.get_filters] +- **`filter_id` (Optional, string | string[])**: A string that uniquely identifies a filter. +- **`from` (Optional, number)**: Skips the specified number of filters. +- **`size` (Optional, number)**: Specifies the maximum number of filters to obtain. -* **Request (object):** +## client.ml.getInfluencers [_ml.get_influencers] +Get anomaly detection job results for influencers. +Influencers are the entities that have contributed to, or are to blame for, +the anomalies. Influencer results are available only if an +`influencer_field_name` is specified in the job configuration. - * **`metric` (Optional, string | string[])**: Limit the information returned the specific metrics. - * **`index` (Optional, string | string[])**: A list of index names; use `_all` or empty string to perform the operation on all indices - * **`completion_fields` (Optional, string | string[])**: List or wildcard expressions of fields to include in fielddata and suggest statistics. - * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. - * **`fielddata_fields` (Optional, string | string[])**: List or wildcard expressions of fields to include in fielddata statistics. - * **`fields` (Optional, string | string[])**: List or wildcard expressions of fields to include in the statistics. - * **`forbid_closed_indices` (Optional, boolean)**: If true, statistics are not collected from closed indices. - * **`groups` (Optional, string | string[])**: List of search groups to include in the search statistics. - * **`include_segment_file_sizes` (Optional, boolean)**: If true, the call reports the aggregated disk usage of each one of the Lucene index files (only applies if segment stats are requested). - * **`include_unloaded_segments` (Optional, boolean)**: If true, the response includes information from segments that are not loaded into memory. - * **`level` (Optional, Enum("cluster" | "indices" | "shards"))**: Indicates whether statistics are aggregated at the cluster, index, or shard level. +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-get-influencers) +```ts +client.ml.getInfluencers({ job_id }) +``` +### Arguments [_arguments_ml.get_influencers] -### update_aliases [_update_aliases] +#### Request (object) [_request_ml.get_influencers] +- **`job_id` (string)**: Identifier for the anomaly detection job. +- **`page` (Optional, { from, size })**: Configures pagination. +This parameter has the `from` and `size` properties. +- **`desc` (Optional, boolean)**: If true, the results are sorted in descending order. +- **`end` (Optional, string | Unit)**: Returns influencers with timestamps earlier than this time. +The default value means it is unset and results are not limited to +specific timestamps. +- **`exclude_interim` (Optional, boolean)**: If true, the output excludes interim results. By default, interim results +are included. +- **`influencer_score` (Optional, number)**: Returns influencers with anomaly scores greater than or equal to this +value. +- **`from` (Optional, number)**: Skips the specified number of influencers. +- **`size` (Optional, number)**: Specifies the maximum number of influencers to obtain. +- **`sort` (Optional, string)**: Specifies the sort field for the requested influencers. By default, the +influencers are sorted by the `influencer_score` value. +- **`start` (Optional, string | Unit)**: Returns influencers with timestamps after this time. The default value +means it is unset and results are not limited to specific timestamps. -Create or update an alias. Adds a data stream or index to an alias. +## client.ml.getJobStats [_ml.get_job_stats] +Get anomaly detection job stats. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-update-aliases) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-get-job-stats) ```ts -client.indices.updateAliases({ ... }) +client.ml.getJobStats({ ... }) ``` +### Arguments [_arguments_ml.get_job_stats] -### Arguments [_arguments_222] - -* **Request (object):** +#### Request (object) [_request_ml.get_job_stats] +- **`job_id` (Optional, string)**: Identifier for the anomaly detection job. It can be a job identifier, a +group name, a list of jobs, or a wildcard expression. If +you do not specify one of these options, the API returns information for +all anomaly detection jobs. +- **`allow_no_match` (Optional, boolean)**: Specifies what to do when the request: - * **`actions` (Optional, { add_backing_index, remove_backing_index }[])**: Actions to perform. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +1. Contains wildcard expressions and there are no jobs that match. +2. Contains the _all string or no identifiers and there are no matches. +3. Contains wildcard expressions and there are only partial matches. +If `true`, the API returns an empty `jobs` array when +there are no matches and the subset of results when there are partial +matches. If `false`, the API returns a `404` status +code when there are no matches or only partial matches. +## client.ml.getJobs [_ml.get_jobs] +Get anomaly detection jobs configuration info. +You can get information for multiple anomaly detection jobs in a single API +request by using a group name, a list of jobs, or a wildcard +expression. You can get information for all anomaly detection jobs by using +`_all`, by specifying `*` as the ``, or by omitting the ``. -### validate_query [_validate_query] - -Validate a query. Validates a query without running it. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-validate-query) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-get-jobs) ```ts -client.indices.validateQuery({ ... }) +client.ml.getJobs({ ... }) ``` +### Arguments [_arguments_ml.get_jobs] -### Arguments [_arguments_223] +#### Request (object) [_request_ml.get_jobs] +- **`job_id` (Optional, string | string[])**: Identifier for the anomaly detection job. It can be a job identifier, a +group name, or a wildcard expression. If you do not specify one of these +options, the API returns information for all anomaly detection jobs. +- **`allow_no_match` (Optional, boolean)**: Specifies what to do when the request: -* **Request (object):** +1. Contains wildcard expressions and there are no jobs that match. +2. Contains the _all string or no identifiers and there are no matches. +3. Contains wildcard expressions and there are only partial matches. - * **`index` (Optional, string | string[])**: List of data streams, indices, and aliases to search. Supports wildcards (`*`). To search all data streams or indices, omit this parameter or use `*` or `_all`. - * **`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: Query in the Lucene query string syntax. - * **`allow_no_indices` (Optional, boolean)**: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. - * **`all_shards` (Optional, boolean)**: If `true`, the validation is executed on all shards instead of one random shard per index. - * **`analyzer` (Optional, string)**: Analyzer to use for the query string. This parameter can only be used when the `q` query string parameter is specified. - * **`analyze_wildcard` (Optional, boolean)**: If `true`, wildcard and prefix queries are analyzed. - * **`default_operator` (Optional, Enum("and" | "or"))**: The default operator for query string query: `AND` or `OR`. - * **`df` (Optional, string)**: Field to use as default where no field prefix is given in the query string. This parameter can only be used when the `q` query string parameter is specified. - * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. - * **`explain` (Optional, boolean)**: If `true`, the response returns detailed information if an error has occurred. - * **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error if it targets a missing or closed index. - * **`lenient` (Optional, boolean)**: If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. - * **`rewrite` (Optional, boolean)**: If `true`, returns a more detailed explanation showing the actual Lucene query that will be executed. - * **`q` (Optional, string)**: Query in the Lucene query string syntax. +The default value is `true`, which returns an empty `jobs` array when +there are no matches and the subset of results when there are partial +matches. If this parameter is `false`, the request returns a `404` status +code when there are no matches or only partial matches. +- **`exclude_generated` (Optional, boolean)**: Indicates if certain fields should be removed from the configuration on +retrieval. This allows the configuration to be in an acceptable format to +be retrieved and then added to another cluster. +## client.ml.getMemoryStats [_ml.get_memory_stats] +Get machine learning memory usage info. +Get information about how machine learning jobs and trained models are using memory, +on each node, both within the JVM heap, and natively, outside of the JVM. +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-get-memory-stats) -## inference [_inference] +```ts +client.ml.getMemoryStats({ ... }) +``` +### Arguments [_arguments_ml.get_memory_stats] -### delete [_delete_6] +#### Request (object) [_request_ml.get_memory_stats] +- **`node_id` (Optional, string)**: The names of particular nodes in the cluster to target. For example, `nodeId1,nodeId2` or +`ml:true` +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout +expires, the request fails and returns an error. +- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request +fails and returns an error. -Delete an inference endpoint +## client.ml.getModelSnapshotUpgradeStats [_ml.get_model_snapshot_upgrade_stats] +Get anomaly detection job model snapshot upgrade usage info. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-delete) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-get-model-snapshot-upgrade-stats) ```ts -client.inference.delete({ inference_id }) +client.ml.getModelSnapshotUpgradeStats({ job_id, snapshot_id }) ``` +### Arguments [_arguments_ml.get_model_snapshot_upgrade_stats] -### Arguments [_arguments_224] +#### Request (object) [_request_ml.get_model_snapshot_upgrade_stats] +- **`job_id` (string)**: Identifier for the anomaly detection job. +- **`snapshot_id` (string)**: A numerical character string that uniquely identifies the model snapshot. You can get information for multiple +snapshots by using a list or a wildcard expression. You can get all snapshots by using `_all`, +by specifying `*` as the snapshot ID, or by omitting the snapshot ID. +- **`allow_no_match` (Optional, boolean)**: Specifies what to do when the request: -* **Request (object):** + - Contains wildcard expressions and there are no jobs that match. + - Contains the _all string or no identifiers and there are no matches. + - Contains wildcard expressions and there are only partial matches. - * **`inference_id` (string)**: The inference Id - * **`task_type` (Optional, Enum("sparse_embedding" | "text_embedding" | "rerank" | "completion"))**: The task type - * **`dry_run` (Optional, boolean)**: When true, the endpoint is not deleted, and a list of ingest processors which reference this endpoint is returned - * **`force` (Optional, boolean)**: When true, the inference endpoint is forcefully deleted even if it is still being used by ingest processors or semantic text fields +The default value is true, which returns an empty jobs array when there are no matches and the subset of results +when there are partial matches. If this parameter is false, the request returns a 404 status code when there are +no matches or only partial matches. +## client.ml.getModelSnapshots [_ml.get_model_snapshots] +Get model snapshots info. +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-get-model-snapshots) -### get [_get_6] +```ts +client.ml.getModelSnapshots({ job_id }) +``` -Get an inference endpoint +### Arguments [_arguments_ml.get_model_snapshots] -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-get) +#### Request (object) [_request_ml.get_model_snapshots] +- **`job_id` (string)**: Identifier for the anomaly detection job. +- **`snapshot_id` (Optional, string)**: A numerical character string that uniquely identifies the model snapshot. You can get information for multiple +snapshots by using a list or a wildcard expression. You can get all snapshots by using `_all`, +by specifying `*` as the snapshot ID, or by omitting the snapshot ID. +- **`desc` (Optional, boolean)**: Refer to the description for the `desc` query parameter. +- **`end` (Optional, string | Unit)**: Refer to the description for the `end` query parameter. +- **`page` (Optional, { from, size })** +- **`sort` (Optional, string)**: Refer to the description for the `sort` query parameter. +- **`start` (Optional, string | Unit)**: Refer to the description for the `start` query parameter. +- **`from` (Optional, number)**: Skips the specified number of snapshots. +- **`size` (Optional, number)**: Specifies the maximum number of snapshots to obtain. -```ts -client.inference.get({ ... }) -``` +## client.ml.getOverallBuckets [_ml.get_overall_buckets] +Get overall bucket results. +Retrievs overall bucket results that summarize the bucket results of +multiple anomaly detection jobs. -### Arguments [_arguments_225] +The `overall_score` is calculated by combining the scores of all the +buckets within the overall bucket span. First, the maximum +`anomaly_score` per anomaly detection job in the overall bucket is +calculated. Then the `top_n` of those scores are averaged to result in +the `overall_score`. This means that you can fine-tune the +`overall_score` so that it is more or less sensitive to the number of +jobs that detect an anomaly at the same time. For example, if you set +`top_n` to `1`, the `overall_score` is the maximum bucket score in the +overall bucket. Alternatively, if you set `top_n` to the number of jobs, +the `overall_score` is high only when all jobs detect anomalies in that +overall bucket. If you set the `bucket_span` parameter (to a value +greater than its default), the `overall_score` is the maximum +`overall_score` of the overall buckets that have a span equal to the +jobs' largest bucket span. -* **Request (object):** +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-get-overall-buckets) - * **`task_type` (Optional, Enum("sparse_embedding" | "text_embedding" | "rerank" | "completion"))**: The task type - * **`inference_id` (Optional, string)**: The inference Id +```ts +client.ml.getOverallBuckets({ job_id }) +``` +### Arguments [_arguments_ml.get_overall_buckets] +#### Request (object) [_request_ml.get_overall_buckets] +- **`job_id` (string)**: Identifier for the anomaly detection job. It can be a job identifier, a +group name, a list of jobs or groups, or a wildcard +expression. -### inference [_inference_2] +You can summarize the bucket results for all anomaly detection jobs by +using `_all` or by specifying `*` as the ``. +- **`allow_no_match` (Optional, boolean)**: Refer to the description for the `allow_no_match` query parameter. +- **`bucket_span` (Optional, string | -1 | 0)**: Refer to the description for the `bucket_span` query parameter. +- **`end` (Optional, string | Unit)**: Refer to the description for the `end` query parameter. +- **`exclude_interim` (Optional, boolean)**: Refer to the description for the `exclude_interim` query parameter. +- **`overall_score` (Optional, number | string)**: Refer to the description for the `overall_score` query parameter. +- **`start` (Optional, string | Unit)**: Refer to the description for the `start` query parameter. +- **`top_n` (Optional, number)**: Refer to the description for the `top_n` query parameter. -Perform inference on the service +## client.ml.getRecords [_ml.get_records] +Get anomaly records for an anomaly detection job. +Records contain the detailed analytical results. They describe the anomalous +activity that has been identified in the input data based on the detector +configuration. +There can be many anomaly records depending on the characteristics and size +of the input data. In practice, there are often too many to be able to +manually process them. The machine learning features therefore perform a +sophisticated aggregation of the anomaly records into buckets. +The number of record results depends on the number of anomalies found in each +bucket, which relates to the number of time series being modeled and the +number of detectors. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-inference) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-get-records) ```ts -client.inference.inference({ inference_id, input }) +client.ml.getRecords({ job_id }) ``` +### Arguments [_arguments_ml.get_records] + +#### Request (object) [_request_ml.get_records] +- **`job_id` (string)**: Identifier for the anomaly detection job. +- **`desc` (Optional, boolean)**: Refer to the description for the `desc` query parameter. +- **`end` (Optional, string | Unit)**: Refer to the description for the `end` query parameter. +- **`exclude_interim` (Optional, boolean)**: Refer to the description for the `exclude_interim` query parameter. +- **`page` (Optional, { from, size })** +- **`record_score` (Optional, number)**: Refer to the description for the `record_score` query parameter. +- **`sort` (Optional, string)**: Refer to the description for the `sort` query parameter. +- **`start` (Optional, string | Unit)**: Refer to the description for the `start` query parameter. +- **`from` (Optional, number)**: Skips the specified number of records. +- **`size` (Optional, number)**: Specifies the maximum number of records to obtain. -### Arguments [_arguments_226] +## client.ml.getTrainedModels [_ml.get_trained_models] +Get trained model configuration info. -* **Request (object):** +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-get-trained-models) - * **`inference_id` (string)**: The inference Id - * **`input` (string | string[])**: Inference input. Either a string or an array of strings. - * **`task_type` (Optional, Enum("sparse_embedding" | "text_embedding" | "rerank" | "completion"))**: The task type - * **`query` (Optional, string)**: Query input, required for rerank task. Not required for other tasks. - * **`task_settings` (Optional, User-defined value)**: Optional task settings - * **`timeout` (Optional, string | -1 | 0)**: Specifies the amount of time to wait for the inference request to complete. +```ts +client.ml.getTrainedModels({ ... }) +``` +### Arguments [_arguments_ml.get_trained_models] +#### Request (object) [_request_ml.get_trained_models] +- **`model_id` (Optional, string | string[])**: The unique identifier of the trained model or a model alias. -### put [_put_2] +You can get information for multiple trained models in a single API +request by using a list of model IDs or a wildcard +expression. +- **`allow_no_match` (Optional, boolean)**: Specifies what to do when the request: -Create an inference endpoint. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. +- Contains wildcard expressions and there are no models that match. +- Contains the _all string or no identifiers and there are no matches. +- Contains wildcard expressions and there are only partial matches. -::::{important} -The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs. -:::: +If true, it returns an empty array when there are no matches and the +subset of results when there are partial matches. +- **`decompress_definition` (Optional, boolean)**: Specifies whether the included model definition should be returned as a +JSON map (true) or in a custom compressed format (false). +- **`exclude_generated` (Optional, boolean)**: Indicates if certain fields should be removed from the configuration on +retrieval. This allows the configuration to be in an acceptable format to +be retrieved and then added to another cluster. +- **`from` (Optional, number)**: Skips the specified number of models. +- **`include` (Optional, Enum("definition" | "feature_importance_baseline" | "hyperparameters" | "total_feature_importance" | "definition_status"))**: A comma delimited string of optional fields to include in the response +body. +- **`size` (Optional, number)**: Specifies the maximum number of models to obtain. +- **`tags` (Optional, string | string[])**: A comma delimited string of tags. A trained model can have many tags, or +none. When supplied, only trained models that contain all the supplied +tags are returned. +## client.ml.getTrainedModelsStats [_ml.get_trained_models_stats] +Get trained models usage info. +You can get usage information for multiple trained +models in a single API request by using a list of model IDs or a wildcard expression. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-get-trained-models-stats) ```ts -client.inference.put({ inference_id }) +client.ml.getTrainedModelsStats({ ... }) ``` +### Arguments [_arguments_ml.get_trained_models_stats] -### Arguments [_arguments_227] +#### Request (object) [_request_ml.get_trained_models_stats] +- **`model_id` (Optional, string | string[])**: The unique identifier of the trained model or a model alias. It can be a +list or a wildcard expression. +- **`allow_no_match` (Optional, boolean)**: Specifies what to do when the request: -* **Request (object):** - - * **`inference_id` (string)**: The inference Id - * **`task_type` (Optional, Enum("sparse_embedding" | "text_embedding" | "rerank" | "completion"))**: The task type - * **`inference_config` (Optional, { service, service_settings, task_settings })** +- Contains wildcard expressions and there are no models that match. +- Contains the _all string or no identifiers and there are no matches. +- Contains wildcard expressions and there are only partial matches. +If true, it returns an empty array when there are no matches and the +subset of results when there are partial matches. +- **`from` (Optional, number)**: Skips the specified number of models. +- **`size` (Optional, number)**: Specifies the maximum number of models to obtain. +## client.ml.inferTrainedModel [_ml.infer_trained_model] +Evaluate a trained model. -### stream_inference [_stream_inference] +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-infer-trained-model) -Perform streaming inference. Get real-time responses for completion tasks by delivering answers incrementally, reducing response times during computation. This API works only with the completion task type. +```ts +client.ml.inferTrainedModel({ model_id, docs }) +``` -::::{important} -The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs. -:::: +### Arguments [_arguments_ml.infer_trained_model] +#### Request (object) [_request_ml.infer_trained_model] +- **`model_id` (string)**: The unique identifier of the trained model. +- **`docs` (Record[])**: An array of objects to pass to the model for inference. The objects should contain a fields matching your +configured trained model input. Typically, for NLP models, the field name is `text_field`. +Currently, for NLP models, only a single value is allowed. +- **`inference_config` (Optional, { regression, classification, text_classification, zero_shot_classification, fill_mask, ner, pass_through, text_embedding, text_expansion, question_answering })**: The inference configuration updates to apply on the API call +- **`timeout` (Optional, string | -1 | 0)**: Controls the amount of time to wait for inference results. -This API requires the `monitor_inference` cluster privilege (the built-in `inference_admin` and `inference_user` roles grant this privilege). You must use a client that supports streaming. +## client.ml.info [_ml.info] +Get machine learning information. +Get defaults and limits used by machine learning. +This endpoint is designed to be used by a user interface that needs to fully +understand machine learning configurations where some options are not +specified, meaning that the defaults should be used. This endpoint may be +used to find out what those defaults are. It also provides information about +the maximum size of machine learning jobs that could run in the current +cluster configuration. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-stream-inference) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-info) ```ts -client.inference.streamInference({ inference_id, input }) +client.ml.info() ``` -### Arguments [_arguments_228] - -* **Request (object):** +## client.ml.openJob [_ml.open_job] +Open anomaly detection jobs. - * **`inference_id` (string)**: The unique identifier for the inference endpoint. - * **`input` (string | string[])**: The text on which you want to perform the inference task. It can be a single string or an array. +An anomaly detection job must be opened to be ready to receive and analyze +data. It can be opened and closed multiple times throughout its lifecycle. +When you open a new job, it starts with an empty model. +When you open an existing job, the most recent model state is automatically +loaded. The job is ready to resume its analysis from where it left off, once +new data is received. +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-open-job) -::::{note} -Inference endpoints for the completion task type currently only support a single string as input. *** *`task_type` (Optional, Enum("sparse_embedding" | "text_embedding" | "rerank" | "completion"))**: The type of task that the model performs. -:::: +```ts +client.ml.openJob({ job_id }) +``` +### Arguments [_arguments_ml.open_job] +#### Request (object) [_request_ml.open_job] +- **`job_id` (string)**: Identifier for the anomaly detection job. +- **`timeout` (Optional, string | -1 | 0)**: Refer to the description for the `timeout` query parameter. -### unified_inference [_unified_inference] +## client.ml.postCalendarEvents [_ml.post_calendar_events] +Add scheduled events to the calendar. -Perform inference on the service using the Unified Schema +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-post-calendar-events) ```ts -client.inference.unifiedInference({ inference_id, messages }) +client.ml.postCalendarEvents({ calendar_id, events }) ``` +### Arguments [_arguments_ml.post_calendar_events] -### Arguments [_arguments_229] - -* **Request (object):** - - * **`inference_id` (string)**: The inference Id - * **`messages` ({ content, role, tool_call_id, tool_calls }[])**: A list of objects representing the conversation. - * **`task_type` (Optional, Enum("sparse_embedding" | "text_embedding" | "rerank" | "completion"))**: The task type - * **`model` (Optional, string)**: The ID of the model to use. - * **`max_completion_tokens` (Optional, number)**: The upper bound limit for the number of tokens that can be generated for a completion request. - * **`stop` (Optional, string[])**: A sequence of strings to control when the model should stop generating additional tokens. - * **`temperature` (Optional, float)**: The sampling temperature to use. - * **`tool_choice` (Optional, string | { type, function })**: Controls which tool is called by the model. - * **`tools` (Optional, { type, function }[])**: A list of tools that the model can call. - * **`top_p` (Optional, float)**: Nucleus sampling, an alternative to sampling with temperature. - * **`timeout` (Optional, string | -1 | 0)**: Specifies the amount of time to wait for the inference request to complete. +#### Request (object) [_request_ml.post_calendar_events] +- **`calendar_id` (string)**: A string that uniquely identifies a calendar. +- **`events` ({ calendar_id, event_id, description, end_time, start_time, skip_result, skip_model_update, force_time_shift }[])**: A list of one of more scheduled events. The event’s start and end times can be specified as integer milliseconds since the epoch or as a string in ISO 8601 format. +## client.ml.postData [_ml.post_data] +Send data to an anomaly detection job for analysis. +IMPORTANT: For each job, data can be accepted from only a single connection at a time. +It is not currently possible to post data to multiple jobs using wildcards or a list. -### update [_update_2] +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-post-data) -Update an inference endpoint. +```ts +client.ml.postData({ job_id }) +``` -Modify `task_settings`, secrets (within `service_settings`), or `num_allocations` for an inference endpoint, depending on the specific endpoint service and `task_type`. +### Arguments [_arguments_ml.post_data] -::::{important} -The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs. -:::: +#### Request (object) [_request_ml.post_data] +- **`job_id` (string)**: Identifier for the anomaly detection job. The job must have a state of open to receive and process the data. +- **`data` (Optional, TData[])** +- **`reset_end` (Optional, string | Unit)**: Specifies the end of the bucket resetting range. +- **`reset_start` (Optional, string | Unit)**: Specifies the start of the bucket resetting range. +## client.ml.previewDataFrameAnalytics [_ml.preview_data_frame_analytics] +Preview features used by data frame analytics. +Preview the extracted features used by a data frame analytics config. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-update) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-preview-data-frame-analytics) ```ts -client.inference.update({ inference_id }) +client.ml.previewDataFrameAnalytics({ ... }) ``` +### Arguments [_arguments_ml.preview_data_frame_analytics] -### Arguments [_arguments_230] - -* **Request (object):** +#### Request (object) [_request_ml.preview_data_frame_analytics] +- **`id` (Optional, string)**: Identifier for the data frame analytics job. +- **`config` (Optional, { source, analysis, model_memory_limit, max_num_threads, analyzed_fields })**: A data frame analytics config as described in create data frame analytics +jobs. Note that `id` and `dest` don’t need to be provided in the context of +this API. - * **`inference_id` (string)**: The unique identifier of the inference endpoint. - * **`task_type` (Optional, Enum("sparse_embedding" | "text_embedding" | "rerank" | "completion"))**: The type of inference task that the model performs. - * **`inference_config` (Optional, { service, service_settings, task_settings })** +## client.ml.previewDatafeed [_ml.preview_datafeed] +Preview a datafeed. +This API returns the first "page" of search results from a datafeed. +You can preview an existing datafeed or provide configuration details for a datafeed +and anomaly detection job in the API. The preview shows the structure of the data +that will be passed to the anomaly detection engine. +IMPORTANT: When Elasticsearch security features are enabled, the preview uses the credentials of the user that +called the API. However, when the datafeed starts it uses the roles of the last user that created or updated the +datafeed. To get a preview that accurately reflects the behavior of the datafeed, use the appropriate credentials. +You can also use secondary authorization headers to supply the credentials. +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-preview-datafeed) +```ts +client.ml.previewDatafeed({ ... }) +``` -## ingest [_ingest] - +### Arguments [_arguments_ml.preview_datafeed] -### delete_geoip_database [_delete_geoip_database] +#### Request (object) [_request_ml.preview_datafeed] +- **`datafeed_id` (Optional, string)**: A numerical character string that uniquely identifies the datafeed. This identifier can contain lowercase +alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric +characters. NOTE: If you use this path parameter, you cannot provide datafeed or anomaly detection job +configuration details in the request body. +- **`datafeed_config` (Optional, { aggregations, chunking_config, datafeed_id, delayed_data_check_config, frequency, indices, indices_options, job_id, max_empty_searches, query, query_delay, runtime_mappings, script_fields, scroll_size })**: The datafeed definition to preview. +- **`job_config` (Optional, { allow_lazy_open, analysis_config, analysis_limits, background_persist_interval, custom_settings, daily_model_snapshot_retention_after_days, data_description, datafeed_config, description, groups, job_id, job_type, model_plot_config, model_snapshot_retention_days, renormalization_window_days, results_index_name, results_retention_days })**: The configuration details for the anomaly detection job that is associated with the datafeed. If the +`datafeed_config` object does not include a `job_id` that references an existing anomaly detection job, you must +supply this `job_config` object. If you include both a `job_id` and a `job_config`, the latter information is +used. You cannot specify a `job_config` object unless you also supply a `datafeed_config` object. +- **`start` (Optional, string | Unit)**: The start time from where the datafeed preview should begin +- **`end` (Optional, string | Unit)**: The end time when the datafeed preview should stop -Delete GeoIP database configurations. Delete one or more IP geolocation database configurations. +## client.ml.putCalendar [_ml.put_calendar] +Create a calendar. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-delete-ip-location-database) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-put-calendar) ```ts -client.ingest.deleteGeoipDatabase({ id }) +client.ml.putCalendar({ calendar_id }) ``` +### Arguments [_arguments_ml.put_calendar] -### Arguments [_arguments_231] +#### Request (object) [_request_ml.put_calendar] +- **`calendar_id` (string)**: A string that uniquely identifies a calendar. +- **`job_ids` (Optional, string[])**: An array of anomaly detection job identifiers. +- **`description` (Optional, string)**: A description of the calendar. -* **Request (object):** +## client.ml.putCalendarJob [_ml.put_calendar_job] +Add anomaly detection job to calendar. - * **`id` (string | string[])**: A list of geoip database configurations to delete - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-put-calendar-job) +```ts +client.ml.putCalendarJob({ calendar_id, job_id }) +``` +### Arguments [_arguments_ml.put_calendar_job] -### delete_ip_location_database [_delete_ip_location_database] +#### Request (object) [_request_ml.put_calendar_job] +- **`calendar_id` (string)**: A string that uniquely identifies a calendar. +- **`job_id` (string | string[])**: An identifier for the anomaly detection jobs. It can be a job identifier, a group name, or a list of jobs or groups. -Delete IP geolocation database configurations. +## client.ml.putDataFrameAnalytics [_ml.put_data_frame_analytics] +Create a data frame analytics job. +This API creates a data frame analytics job that performs an analysis on the +source indices and stores the outcome in a destination index. +By default, the query used in the source configuration is `{"match_all": {}}`. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-delete-ip-location-database) +If the destination index does not exist, it is created automatically when you start the job. + +If you supply only a subset of the regression or classification parameters, hyperparameter optimization occurs. It determines a value for each of the undefined parameters. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-put-data-frame-analytics) ```ts -client.ingest.deleteIpLocationDatabase({ id }) +client.ml.putDataFrameAnalytics({ id, analysis, dest, source }) ``` +### Arguments [_arguments_ml.put_data_frame_analytics] + +#### Request (object) [_request_ml.put_data_frame_analytics] +- **`id` (string)**: Identifier for the data frame analytics job. This identifier can contain +lowercase alphanumeric characters (a-z and 0-9), hyphens, and +underscores. It must start and end with alphanumeric characters. +- **`analysis` ({ classification, outlier_detection, regression })**: The analysis configuration, which contains the information necessary to +perform one of the following types of analysis: classification, outlier +detection, or regression. +- **`dest` ({ index, results_field })**: The destination configuration. +- **`source` ({ index, query, runtime_mappings, _source })**: The configuration of how to source the analysis data. +- **`allow_lazy_start` (Optional, boolean)**: Specifies whether this job can start when there is insufficient machine +learning node capacity for it to be immediately assigned to a node. If +set to `false` and a machine learning node with capacity to run the job +cannot be immediately found, the API returns an error. If set to `true`, +the API does not return an error; the job waits in the `starting` state +until sufficient machine learning node capacity is available. This +behavior is also affected by the cluster-wide +`xpack.ml.max_lazy_ml_nodes` setting. +- **`analyzed_fields` (Optional, { includes, excludes })**: Specifies `includes` and/or `excludes` patterns to select which fields +will be included in the analysis. The patterns specified in `excludes` +are applied last, therefore `excludes` takes precedence. In other words, +if the same field is specified in both `includes` and `excludes`, then +the field will not be included in the analysis. If `analyzed_fields` is +not set, only the relevant fields will be included. For example, all the +numeric fields for outlier detection. +The supported fields vary for each type of analysis. Outlier detection +requires numeric or `boolean` data to analyze. The algorithms don’t +support missing values therefore fields that have data types other than +numeric or boolean are ignored. Documents where included fields contain +missing values, null values, or an array are also ignored. Therefore the +`dest` index may contain documents that don’t have an outlier score. +Regression supports fields that are numeric, `boolean`, `text`, +`keyword`, and `ip` data types. It is also tolerant of missing values. +Fields that are supported are included in the analysis, other fields are +ignored. Documents where included fields contain an array with two or +more values are also ignored. Documents in the `dest` index that don’t +contain a results field are not included in the regression analysis. +Classification supports fields that are numeric, `boolean`, `text`, +`keyword`, and `ip` data types. It is also tolerant of missing values. +Fields that are supported are included in the analysis, other fields are +ignored. Documents where included fields contain an array with two or +more values are also ignored. Documents in the `dest` index that don’t +contain a results field are not included in the classification analysis. +Classification analysis can be improved by mapping ordinal variable +values to a single number. For example, in case of age ranges, you can +model the values as `0-14 = 0`, `15-24 = 1`, `25-34 = 2`, and so on. +- **`description` (Optional, string)**: A description of the job. +- **`max_num_threads` (Optional, number)**: The maximum number of threads to be used by the analysis. Using more +threads may decrease the time necessary to complete the analysis at the +cost of using more CPU. Note that the process may use additional threads +for operational functionality other than the analysis itself. +- **`_meta` (Optional, Record)** +- **`model_memory_limit` (Optional, string)**: The approximate maximum amount of memory resources that are permitted for +analytical processing. If your `elasticsearch.yml` file contains an +`xpack.ml.max_model_memory_limit` setting, an error occurs when you try +to create data frame analytics jobs that have `model_memory_limit` values +greater than that setting. +- **`headers` (Optional, Record)** +- **`version` (Optional, string)** + +## client.ml.putDatafeed [_ml.put_datafeed] +Create a datafeed. +Datafeeds retrieve data from Elasticsearch for analysis by an anomaly detection job. +You can associate only one datafeed with each anomaly detection job. +The datafeed contains a query that runs at a defined interval (`frequency`). +If you are concerned about delayed data, you can add a delay (`query_delay') at each interval. +By default, the datafeed uses the following query: `{"match_all": {"boost": 1}}`. + +When Elasticsearch security features are enabled, your datafeed remembers which roles the user who created it had +at the time of creation and runs the query using those same roles. If you provide secondary authorization headers, +those credentials are used instead. +You must use Kibana, this API, or the create anomaly detection jobs API to create a datafeed. Do not add a datafeed +directly to the `.ml-config` index. Do not give users `write` privileges on the `.ml-config` index. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-put-datafeed) -### Arguments [_arguments_232] +```ts +client.ml.putDatafeed({ datafeed_id }) +``` -* **Request (object):** +### Arguments [_arguments_ml.put_datafeed] + +#### Request (object) [_request_ml.put_datafeed] +- **`datafeed_id` (string)**: A numerical character string that uniquely identifies the datafeed. +This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. +It must start and end with alphanumeric characters. +- **`aggregations` (Optional, Record)**: If set, the datafeed performs aggregation searches. +Support for aggregations is limited and should be used only with low cardinality data. +- **`chunking_config` (Optional, { mode, time_span })**: Datafeeds might be required to search over long time periods, for several months or years. +This search is split into time chunks in order to ensure the load on Elasticsearch is managed. +Chunking configuration controls how the size of these time chunks are calculated; +it is an advanced configuration option. +- **`delayed_data_check_config` (Optional, { check_window, enabled })**: Specifies whether the datafeed checks for missing data and the size of the window. +The datafeed can optionally search over indices that have already been read in an effort to determine whether +any data has subsequently been added to the index. If missing data is found, it is a good indication that the +`query_delay` is set too low and the data is being indexed after the datafeed has passed that moment in time. +This check runs only on real-time datafeeds. +- **`frequency` (Optional, string | -1 | 0)**: The interval at which scheduled queries are made while the datafeed runs in real time. +The default value is either the bucket span for short bucket spans, or, for longer bucket spans, a sensible +fraction of the bucket span. When `frequency` is shorter than the bucket span, interim results for the last +(partial) bucket are written then eventually overwritten by the full bucket results. If the datafeed uses +aggregations, this value must be divisible by the interval of the date histogram aggregation. +- **`indices` (Optional, string | string[])**: An array of index names. Wildcards are supported. If any of the indices are in remote clusters, the master +nodes and the machine learning nodes must have the `remote_cluster_client` role. +- **`indices_options` (Optional, { allow_no_indices, expand_wildcards, ignore_unavailable, ignore_throttled })**: Specifies index expansion options that are used during search +- **`job_id` (Optional, string)**: Identifier for the anomaly detection job. +- **`max_empty_searches` (Optional, number)**: If a real-time datafeed has never seen any data (including during any initial training period), it automatically +stops and closes the associated job after this many real-time searches return no documents. In other words, +it stops after `frequency` times `max_empty_searches` of real-time operation. If not set, a datafeed with no +end time that sees no data remains started until it is explicitly stopped. By default, it is not set. +- **`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: The Elasticsearch query domain-specific language (DSL). This value corresponds to the query object in an +Elasticsearch search POST body. All the options that are supported by Elasticsearch can be used, as this +object is passed verbatim to Elasticsearch. +- **`query_delay` (Optional, string | -1 | 0)**: The number of seconds behind real time that data is queried. For example, if data from 10:04 a.m. might +not be searchable in Elasticsearch until 10:06 a.m., set this property to 120 seconds. The default +value is randomly selected between `60s` and `120s`. This randomness improves the query performance +when there are multiple jobs running on the same node. +- **`runtime_mappings` (Optional, Record)**: Specifies runtime fields for the datafeed search. +- **`script_fields` (Optional, Record)**: Specifies scripts that evaluate custom expressions and returns script fields to the datafeed. +The detector configuration objects in a job can contain functions that use these script fields. +- **`scroll_size` (Optional, number)**: The size parameter that is used in Elasticsearch searches when the datafeed does not use aggregations. +The maximum value is the value of `index.max_result_window`, which is 10,000 by default. +- **`headers` (Optional, Record)** +- **`allow_no_indices` (Optional, boolean)**: If true, wildcard indices expressions that resolve into no concrete indices are ignored. This includes the `_all` +string or when no indices are specified. +- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines +whether wildcard expressions match hidden data streams. Supports a list of values. +- **`ignore_throttled` (Optional, boolean)**: If true, concrete, expanded, or aliased indices are ignored when frozen. +- **`ignore_unavailable` (Optional, boolean)**: If true, unavailable indices (missing or closed) are ignored. + +## client.ml.putFilter [_ml.put_filter] +Create a filter. +A filter contains a list of strings. It can be used by one or more anomaly detection jobs. +Specifically, filters are referenced in the `custom_rules` property of detector configuration objects. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-put-filter) - * **`id` (string | string[])**: A list of IP location database configurations. - * **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. A value of `-1` indicates that the request should never time out. - * **`timeout` (Optional, string | -1 | 0)**: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. A value of `-1` indicates that the request should never time out. +```ts +client.ml.putFilter({ filter_id }) +``` +### Arguments [_arguments_ml.put_filter] +#### Request (object) [_request_ml.put_filter] +- **`filter_id` (string)**: A string that uniquely identifies a filter. +- **`description` (Optional, string)**: A description of the filter. +- **`items` (Optional, string[])**: The items of the filter. A wildcard `*` can be used at the beginning or the end of an item. +Up to 10000 items are allowed in each filter. -### delete_pipeline [_delete_pipeline] +## client.ml.putJob [_ml.put_job] +Create an anomaly detection job. -Delete pipelines. Delete one or more ingest pipelines. +If you include a `datafeed_config`, you must have read index privileges on the source index. +If you include a `datafeed_config` but do not provide a query, the datafeed uses `{"match_all": {"boost": 1}}`. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-delete-pipeline) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-put-job) ```ts -client.ingest.deletePipeline({ id }) +client.ml.putJob({ job_id, analysis_config, data_description }) ``` +### Arguments [_arguments_ml.put_job] + +#### Request (object) [_request_ml.put_job] +- **`job_id` (string)**: The identifier for the anomaly detection job. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. +- **`analysis_config` ({ bucket_span, categorization_analyzer, categorization_field_name, categorization_filters, detectors, influencers, latency, model_prune_window, multivariate_by_fields, per_partition_categorization, summary_count_field_name })**: Specifies how to analyze the data. After you create a job, you cannot change the analysis configuration; all the properties are informational. +- **`data_description` ({ format, time_field, time_format, field_delimiter })**: Defines the format of the input data when you send data to the job by using the post data API. Note that when configure a datafeed, these properties are automatically set. When data is received via the post data API, it is not stored in Elasticsearch. Only the results for anomaly detection are retained. +- **`allow_lazy_open` (Optional, boolean)**: Advanced configuration option. Specifies whether this job can open when there is insufficient machine learning node capacity for it to be immediately assigned to a node. By default, if a machine learning node with capacity to run the job cannot immediately be found, the open anomaly detection jobs API returns an error. However, this is also subject to the cluster-wide `xpack.ml.max_lazy_ml_nodes` setting. If this option is set to true, the open anomaly detection jobs API does not return an error and the job waits in the opening state until sufficient machine learning node capacity is available. +- **`analysis_limits` (Optional, { categorization_examples_limit, model_memory_limit })**: Limits can be applied for the resources required to hold the mathematical models in memory. These limits are approximate and can be set per job. They do not control the memory used by other processes, for example the Elasticsearch Java processes. +- **`background_persist_interval` (Optional, string | -1 | 0)**: Advanced configuration option. The time between each periodic persistence of the model. The default value is a randomized value between 3 to 4 hours, which avoids all jobs persisting at exactly the same time. The smallest allowed value is 1 hour. For very large models (several GB), persistence could take 10-20 minutes, so do not set the `background_persist_interval` value too low. +- **`custom_settings` (Optional, User-defined value)**: Advanced configuration option. Contains custom meta data about the job. +- **`daily_model_snapshot_retention_after_days` (Optional, number)**: Advanced configuration option, which affects the automatic removal of old model snapshots for this job. It specifies a period of time (in days) after which only the first snapshot per day is retained. This period is relative to the timestamp of the most recent snapshot for this job. Valid values range from 0 to `model_snapshot_retention_days`. +- **`datafeed_config` (Optional, { aggregations, chunking_config, datafeed_id, delayed_data_check_config, frequency, indices, indices_options, job_id, max_empty_searches, query, query_delay, runtime_mappings, script_fields, scroll_size })**: Defines a datafeed for the anomaly detection job. If Elasticsearch security features are enabled, your datafeed remembers which roles the user who created it had at the time of creation and runs the query using those same roles. If you provide secondary authorization headers, those credentials are used instead. +- **`description` (Optional, string)**: A description of the job. +- **`groups` (Optional, string[])**: A list of job groups. A job can belong to no groups or many. +- **`model_plot_config` (Optional, { annotations_enabled, enabled, terms })**: This advanced configuration option stores model information along with the results. It provides a more detailed view into anomaly detection. If you enable model plot it can add considerable overhead to the performance of the system; it is not feasible for jobs with many entities. Model plot provides a simplified and indicative view of the model and its bounds. It does not display complex features such as multivariate correlations or multimodal data. As such, anomalies may occasionally be reported which cannot be seen in the model plot. Model plot config can be configured when the job is created or updated later. It must be disabled if performance issues are experienced. +- **`model_snapshot_retention_days` (Optional, number)**: Advanced configuration option, which affects the automatic removal of old model snapshots for this job. It specifies the maximum period of time (in days) that snapshots are retained. This period is relative to the timestamp of the most recent snapshot for this job. By default, snapshots ten days older than the newest snapshot are deleted. +- **`renormalization_window_days` (Optional, number)**: Advanced configuration option. The period over which adjustments to the score are applied, as new data is seen. The default value is the longer of 30 days or 100 bucket spans. +- **`results_index_name` (Optional, string)**: A text string that affects the name of the machine learning results index. By default, the job generates an index named `.ml-anomalies-shared`. +- **`results_retention_days` (Optional, number)**: Advanced configuration option. The period of time (in days) that results are retained. Age is calculated relative to the timestamp of the latest bucket result. If this property has a non-null value, once per day at 00:30 (server time), results that are the specified number of days older than the latest bucket result are deleted from Elasticsearch. The default value is null, which means all results are retained. Annotations generated by the system also count as results for retention purposes; they are deleted after the same number of days as results. Annotations added by users are retained forever. +- **`allow_no_indices` (Optional, boolean)**: If `true`, wildcard indices expressions that resolve into no concrete indices are ignored. This includes the +`_all` string or when no indices are specified. +- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines +whether wildcard expressions match hidden data streams. Supports a list of values. Valid values are: + +* `all`: Match any data stream or index, including hidden ones. +* `closed`: Match closed, non-hidden indices. Also matches any non-hidden data stream. Data streams cannot be closed. +* `hidden`: Match hidden data streams and hidden indices. Must be combined with `open`, `closed`, or both. +* `none`: Wildcard patterns are not accepted. +* `open`: Match open, non-hidden indices. Also matches any non-hidden data stream. +- **`ignore_throttled` (Optional, boolean)**: If `true`, concrete, expanded or aliased indices are ignored when frozen. +- **`ignore_unavailable` (Optional, boolean)**: If `true`, unavailable indices (missing or closed) are ignored. + +## client.ml.putTrainedModel [_ml.put_trained_model] +Create a trained model. +Enable you to supply a trained model that is not created by data frame analytics. -### Arguments [_arguments_233] +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-put-trained-model) -* **Request (object):** +```ts +client.ml.putTrainedModel({ model_id }) +``` - * **`id` (string)**: Pipeline ID or wildcard expression of pipeline IDs used to limit the request. To delete all ingest pipelines in a cluster, use a value of `*`. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +### Arguments [_arguments_ml.put_trained_model] + +#### Request (object) [_request_ml.put_trained_model] +- **`model_id` (string)**: The unique identifier of the trained model. +- **`compressed_definition` (Optional, string)**: The compressed (GZipped and Base64 encoded) inference definition of the +model. If compressed_definition is specified, then definition cannot be +specified. +- **`definition` (Optional, { preprocessors, trained_model })**: The inference definition for the model. If definition is specified, then +compressed_definition cannot be specified. +- **`description` (Optional, string)**: A human-readable description of the inference trained model. +- **`inference_config` (Optional, { regression, classification, text_classification, zero_shot_classification, fill_mask, learning_to_rank, ner, pass_through, text_embedding, text_expansion, question_answering })**: The default configuration for inference. This can be either a regression +or classification configuration. It must match the underlying +definition.trained_model's target_type. For pre-packaged models such as +ELSER the config is not required. +- **`input` (Optional, { field_names })**: The input field names for the model definition. +- **`metadata` (Optional, User-defined value)**: An object map that contains metadata about the model. +- **`model_type` (Optional, Enum("tree_ensemble" | "lang_ident" | "pytorch"))**: The model type. +- **`model_size_bytes` (Optional, number)**: The estimated memory usage in bytes to keep the trained model in memory. +This property is supported only if defer_definition_decompression is true +or the model definition is not supplied. +- **`platform_architecture` (Optional, string)**: The platform architecture (if applicable) of the trained mode. If the model +only works on one platform, because it is heavily optimized for a particular +processor architecture and OS combination, then this field specifies which. +The format of the string must match the platform identifiers used by Elasticsearch, +so one of, `linux-x86_64`, `linux-aarch64`, `darwin-x86_64`, `darwin-aarch64`, +or `windows-x86_64`. For portable models (those that work independent of processor +architecture or OS features), leave this field unset. +- **`tags` (Optional, string[])**: An array of tags to organize the model. +- **`prefix_strings` (Optional, { ingest, search })**: Optional prefix strings applied at inference +- **`defer_definition_decompression` (Optional, boolean)**: If set to `true` and a `compressed_definition` is provided, +the request defers definition decompression and skips relevant +validations. +- **`wait_for_completion` (Optional, boolean)**: Whether to wait for all child operations (e.g. model download) +to complete. + +## client.ml.putTrainedModelAlias [_ml.put_trained_model_alias] +Create or update a trained model alias. +A trained model alias is a logical name used to reference a single trained +model. +You can use aliases instead of trained model identifiers to make it easier to +reference your models. For example, you can use aliases in inference +aggregations and processors. +An alias must be unique and refer to only a single trained model. However, +you can have multiple aliases for each trained model. +If you use this API to update an alias such that it references a different +trained model ID and the model uses a different type of data frame analytics, +an error occurs. For example, this situation occurs if you have a trained +model for regression analysis and a trained model for classification +analysis; you cannot reassign an alias from one type of trained model to +another. +If you use this API to update an alias and there are very few input fields in +common between the old and new trained models for the model alias, the API +returns a warning. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-put-trained-model-alias) +```ts +client.ml.putTrainedModelAlias({ model_alias, model_id }) +``` +### Arguments [_arguments_ml.put_trained_model_alias] -### geo_ip_stats [_geo_ip_stats] +#### Request (object) [_request_ml.put_trained_model_alias] +- **`model_alias` (string)**: The alias to create or update. This value cannot end in numbers. +- **`model_id` (string)**: The identifier for the trained model that the alias refers to. +- **`reassign` (Optional, boolean)**: Specifies whether the alias gets reassigned to the specified trained +model if it is already assigned to a different model. If the alias is +already assigned and this parameter is false, the API returns an error. -Get GeoIP statistics. Get download statistics for GeoIP2 databases that are used with the GeoIP processor. +## client.ml.putTrainedModelDefinitionPart [_ml.put_trained_model_definition_part] +Create part of a trained model definition. -[Endpoint documentation](elasticsearch://reference/ingestion-tools/enrich-processor/geoip-processor.md) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-put-trained-model-definition-part) ```ts -client.ingest.geoIpStats() +client.ml.putTrainedModelDefinitionPart({ model_id, part, definition, total_definition_length, total_parts }) ``` +### Arguments [_arguments_ml.put_trained_model_definition_part] -### get_geoip_database [_get_geoip_database] +#### Request (object) [_request_ml.put_trained_model_definition_part] +- **`model_id` (string)**: The unique identifier of the trained model. +- **`part` (number)**: The definition part number. When the definition is loaded for inference the definition parts are streamed in the +order of their part number. The first part must be `0` and the final part must be `total_parts - 1`. +- **`definition` (string)**: The definition part for the model. Must be a base64 encoded string. +- **`total_definition_length` (number)**: The total uncompressed definition length in bytes. Not base64 encoded. +- **`total_parts` (number)**: The total number of parts that will be uploaded. Must be greater than 0. -Get GeoIP database configurations. Get information about one or more IP geolocation database configurations. +## client.ml.putTrainedModelVocabulary [_ml.put_trained_model_vocabulary] +Create a trained model vocabulary. +This API is supported only for natural language processing (NLP) models. +The vocabulary is stored in the index as described in `inference_config.*.vocabulary` of the trained model definition. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-get-ip-location-database) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-put-trained-model-vocabulary) ```ts -client.ingest.getGeoipDatabase({ ... }) +client.ml.putTrainedModelVocabulary({ model_id, vocabulary }) ``` +### Arguments [_arguments_ml.put_trained_model_vocabulary] -### Arguments [_arguments_234] +#### Request (object) [_request_ml.put_trained_model_vocabulary] +- **`model_id` (string)**: The unique identifier of the trained model. +- **`vocabulary` (string[])**: The model vocabulary, which must not be empty. +- **`merges` (Optional, string[])**: The optional model merges if required by the tokenizer. +- **`scores` (Optional, number[])**: The optional vocabulary value scores if required by the tokenizer. -* **Request (object):** +## client.ml.resetJob [_ml.reset_job] +Reset an anomaly detection job. +All model state and results are deleted. The job is ready to start over as if +it had just been created. +It is not currently possible to reset multiple jobs using wildcards or a +comma separated list. - * **`id` (Optional, string | string[])**: List of database configuration IDs to retrieve. Wildcard (`*`) expressions are supported. To get all database configurations, omit this parameter or use `*`. +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-reset-job) +```ts +client.ml.resetJob({ job_id }) +``` +### Arguments [_arguments_ml.reset_job] -### get_ip_location_database [_get_ip_location_database] +#### Request (object) [_request_ml.reset_job] +- **`job_id` (string)**: The ID of the job to reset. +- **`wait_for_completion` (Optional, boolean)**: Should this request wait until the operation has completed before +returning. +- **`delete_user_annotations` (Optional, boolean)**: Specifies whether annotations that have been added by the +user should be deleted along with any auto-generated annotations when the job is +reset. -Get IP geolocation database configurations. +## client.ml.revertModelSnapshot [_ml.revert_model_snapshot] +Revert to a snapshot. +The machine learning features react quickly to anomalous input, learning new +behaviors in data. Highly anomalous input increases the variance in the +models whilst the system learns whether this is a new step-change in behavior +or a one-off event. In the case where this anomalous input is known to be a +one-off, then it might be appropriate to reset the model state to a time +before this event. For example, you might consider reverting to a saved +snapshot after Black Friday or a critical system failure. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-get-ip-location-database) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-revert-model-snapshot) ```ts -client.ingest.getIpLocationDatabase({ ... }) +client.ml.revertModelSnapshot({ job_id, snapshot_id }) ``` +### Arguments [_arguments_ml.revert_model_snapshot] -### Arguments [_arguments_235] +#### Request (object) [_request_ml.revert_model_snapshot] +- **`job_id` (string)**: Identifier for the anomaly detection job. +- **`snapshot_id` (string)**: You can specify `empty` as the . Reverting to the empty +snapshot means the anomaly detection job starts learning a new model from +scratch when it is started. +- **`delete_intervening_results` (Optional, boolean)**: Refer to the description for the `delete_intervening_results` query parameter. -* **Request (object):** +## client.ml.setUpgradeMode [_ml.set_upgrade_mode] +Set upgrade_mode for ML indices. +Sets a cluster wide upgrade_mode setting that prepares machine learning +indices for an upgrade. +When upgrading your cluster, in some circumstances you must restart your +nodes and reindex your machine learning indices. In those circumstances, +there must be no machine learning jobs running. You can close the machine +learning jobs, do the upgrade, then open all the jobs again. Alternatively, +you can use this API to temporarily halt tasks associated with the jobs and +datafeeds and prevent new jobs from opening. You can also use this API +during upgrades that do not require you to reindex your machine learning +indices, though stopping jobs is not a requirement in that case. +You can see the current value for the upgrade_mode setting by using the get +machine learning info API. - * **`id` (Optional, string | string[])**: List of database configuration IDs to retrieve. Wildcard (`*`) expressions are supported. To get all database configurations, omit this parameter or use `*`. - * **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. A value of `-1` indicates that the request should never time out. +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-set-upgrade-mode) +```ts +client.ml.setUpgradeMode({ ... }) +``` +### Arguments [_arguments_ml.set_upgrade_mode] -### get_pipeline [_get_pipeline] +#### Request (object) [_request_ml.set_upgrade_mode] +- **`enabled` (Optional, boolean)**: When `true`, it enables `upgrade_mode` which temporarily halts all job +and datafeed tasks and prohibits new job and datafeed tasks from +starting. +- **`timeout` (Optional, string | -1 | 0)**: The time to wait for the request to be completed. -Get pipelines. Get information about one or more ingest pipelines. This API returns a local reference of the pipeline. +## client.ml.startDataFrameAnalytics [_ml.start_data_frame_analytics] +Start a data frame analytics job. +A data frame analytics job can be started and stopped multiple times +throughout its lifecycle. +If the destination index does not exist, it is created automatically the +first time you start the data frame analytics job. The +`index.number_of_shards` and `index.number_of_replicas` settings for the +destination index are copied from the source index. If there are multiple +source indices, the destination index copies the highest setting values. The +mappings for the destination index are also copied from the source indices. +If there are any mapping conflicts, the job fails to start. +If the destination index exists, it is used as is. You can therefore set up +the destination index in advance with custom settings and mappings. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-get-pipeline) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-start-data-frame-analytics) ```ts -client.ingest.getPipeline({ ... }) +client.ml.startDataFrameAnalytics({ id }) ``` +### Arguments [_arguments_ml.start_data_frame_analytics] -### Arguments [_arguments_236] - -* **Request (object):** +#### Request (object) [_request_ml.start_data_frame_analytics] +- **`id` (string)**: Identifier for the data frame analytics job. This identifier can contain +lowercase alphanumeric characters (a-z and 0-9), hyphens, and +underscores. It must start and end with alphanumeric characters. +- **`timeout` (Optional, string | -1 | 0)**: Controls the amount of time to wait until the data frame analytics job +starts. - * **`id` (Optional, string)**: List of pipeline IDs to retrieve. Wildcard (`*`) expressions are supported. To get all ingest pipelines, omit this parameter or use `*`. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - * **`summary` (Optional, boolean)**: Return pipelines without their definitions (default: false) +## client.ml.startDatafeed [_ml.start_datafeed] +Start datafeeds. +A datafeed must be started in order to retrieve data from Elasticsearch. A datafeed can be started and stopped +multiple times throughout its lifecycle. +Before you can start a datafeed, the anomaly detection job must be open. Otherwise, an error occurs. -### processor_grok [_processor_grok] +If you restart a stopped datafeed, it continues processing input data from the next millisecond after it was stopped. +If new data was indexed for that exact millisecond between stopping and starting, it will be ignored. -Run a grok processor. Extract structured fields out of a single text field within a document. You must choose which field to extract matched fields from, as well as the grok pattern you expect will match. A grok pattern is like a regular expression that supports aliased expressions that can be reused. +When Elasticsearch security features are enabled, your datafeed remembers which roles the last user to create or +update it had at the time of creation or update and runs the query using those same roles. If you provided secondary +authorization headers when you created or updated the datafeed, those credentials are used instead. -[Endpoint documentation](elasticsearch://reference/ingestion-tools/enrich-processor/grok-processor.md) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-start-datafeed) ```ts -client.ingest.processorGrok() +client.ml.startDatafeed({ datafeed_id }) ``` +### Arguments [_arguments_ml.start_datafeed] -### put_geoip_database [_put_geoip_database] +#### Request (object) [_request_ml.start_datafeed] +- **`datafeed_id` (string)**: A numerical character string that uniquely identifies the datafeed. This identifier can contain lowercase +alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric +characters. +- **`end` (Optional, string | Unit)**: Refer to the description for the `end` query parameter. +- **`start` (Optional, string | Unit)**: Refer to the description for the `start` query parameter. +- **`timeout` (Optional, string | -1 | 0)**: Refer to the description for the `timeout` query parameter. -Create or update a GeoIP database configuration. Refer to the create or update IP geolocation database configuration API. +## client.ml.startTrainedModelDeployment [_ml.start_trained_model_deployment] +Start a trained model deployment. +It allocates the model to every machine learning node. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-put-ip-location-database) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-start-trained-model-deployment) ```ts -client.ingest.putGeoipDatabase({ id, name, maxmind }) +client.ml.startTrainedModelDeployment({ model_id }) ``` +### Arguments [_arguments_ml.start_trained_model_deployment] + +#### Request (object) [_request_ml.start_trained_model_deployment] +- **`model_id` (string)**: The unique identifier of the trained model. Currently, only PyTorch models are supported. +- **`adaptive_allocations` (Optional, { enabled, min_number_of_allocations, max_number_of_allocations })**: Adaptive allocations configuration. When enabled, the number of allocations +is set based on the current load. +If adaptive_allocations is enabled, do not set the number of allocations manually. +- **`cache_size` (Optional, number | string)**: The inference cache size (in memory outside the JVM heap) per node for the model. +The default value is the same size as the `model_size_bytes`. To disable the cache, +`0b` can be provided. +- **`deployment_id` (Optional, string)**: A unique identifier for the deployment of the model. +- **`number_of_allocations` (Optional, number)**: The number of model allocations on each node where the model is deployed. +All allocations on a node share the same copy of the model in memory but use +a separate set of threads to evaluate the model. +Increasing this value generally increases the throughput. +If this setting is greater than the number of hardware threads +it will automatically be changed to a value less than the number of hardware threads. +If adaptive_allocations is enabled, do not set this value, because it’s automatically set. +- **`priority` (Optional, Enum("normal" | "low"))**: The deployment priority. +- **`queue_capacity` (Optional, number)**: Specifies the number of inference requests that are allowed in the queue. After the number of requests exceeds +this value, new requests are rejected with a 429 error. +- **`threads_per_allocation` (Optional, number)**: Sets the number of threads used by each model allocation during inference. This generally increases +the inference speed. The inference process is a compute-bound process; any number +greater than the number of available hardware threads on the machine does not increase the +inference speed. If this setting is greater than the number of hardware threads +it will automatically be changed to a value less than the number of hardware threads. +- **`timeout` (Optional, string | -1 | 0)**: Specifies the amount of time to wait for the model to deploy. +- **`wait_for` (Optional, Enum("started" | "starting" | "fully_allocated"))**: Specifies the allocation status to wait for before returning. + +## client.ml.stopDataFrameAnalytics [_ml.stop_data_frame_analytics] +Stop data frame analytics jobs. +A data frame analytics job can be started and stopped multiple times +throughout its lifecycle. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-stop-data-frame-analytics) -### Arguments [_arguments_237] - -* **Request (object):** +```ts +client.ml.stopDataFrameAnalytics({ id }) +``` - * **`id` (string)**: ID of the database configuration to create or update. - * **`name` (string)**: The provider-assigned name of the IP geolocation database to download. - * **`maxmind` ({ account_id })**: The configuration necessary to identify which IP geolocation provider to use to download the database, as well as any provider-specific configuration necessary for such downloading. At present, the only supported provider is maxmind, and the maxmind provider requires that an account_id (string) is configured. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +### Arguments [_arguments_ml.stop_data_frame_analytics] +#### Request (object) [_request_ml.stop_data_frame_analytics] +- **`id` (string)**: Identifier for the data frame analytics job. This identifier can contain +lowercase alphanumeric characters (a-z and 0-9), hyphens, and +underscores. It must start and end with alphanumeric characters. +- **`allow_no_match` (Optional, boolean)**: Specifies what to do when the request: +1. Contains wildcard expressions and there are no data frame analytics +jobs that match. +2. Contains the _all string or no identifiers and there are no matches. +3. Contains wildcard expressions and there are only partial matches. -### put_ip_location_database [_put_ip_location_database] +The default value is true, which returns an empty data_frame_analytics +array when there are no matches and the subset of results when there are +partial matches. If this parameter is false, the request returns a 404 +status code when there are no matches or only partial matches. +- **`force` (Optional, boolean)**: If true, the data frame analytics job is stopped forcefully. +- **`timeout` (Optional, string | -1 | 0)**: Controls the amount of time to wait until the data frame analytics job +stops. Defaults to 20 seconds. -Create or update an IP geolocation database configuration. +## client.ml.stopDatafeed [_ml.stop_datafeed] +Stop datafeeds. +A datafeed that is stopped ceases to retrieve data from Elasticsearch. A datafeed can be started and stopped +multiple times throughout its lifecycle. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-put-ip-location-database) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-stop-datafeed) ```ts -client.ingest.putIpLocationDatabase({ id }) +client.ml.stopDatafeed({ datafeed_id }) ``` +### Arguments [_arguments_ml.stop_datafeed] -### Arguments [_arguments_238] +#### Request (object) [_request_ml.stop_datafeed] +- **`datafeed_id` (string)**: Identifier for the datafeed. You can stop multiple datafeeds in a single API request by using a comma-separated +list of datafeeds or a wildcard expression. You can close all datafeeds by using `_all` or by specifying `*` as +the identifier. +- **`allow_no_match` (Optional, boolean)**: Refer to the description for the `allow_no_match` query parameter. +- **`force` (Optional, boolean)**: Refer to the description for the `force` query parameter. +- **`timeout` (Optional, string | -1 | 0)**: Refer to the description for the `timeout` query parameter. -* **Request (object):** +## client.ml.stopTrainedModelDeployment [_ml.stop_trained_model_deployment] +Stop a trained model deployment. - * **`id` (string)**: The database configuration identifier. - * **`configuration` (Optional, { name, maxmind, ipinfo })** - * **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. A value of `-1` indicates that the request should never time out. - * **`timeout` (Optional, string | -1 | 0)**: The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. If no response is received before the timeout expires, the cluster metadata update still applies but the response indicates that it was not completely acknowledged. A value of `-1` indicates that the request should never time out. +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-stop-trained-model-deployment) +```ts +client.ml.stopTrainedModelDeployment({ model_id }) +``` +### Arguments [_arguments_ml.stop_trained_model_deployment] -### put_pipeline [_put_pipeline] +#### Request (object) [_request_ml.stop_trained_model_deployment] +- **`model_id` (string)**: The unique identifier of the trained model. +- **`allow_no_match` (Optional, boolean)**: Specifies what to do when the request: contains wildcard expressions and there are no deployments that match; +contains the `_all` string or no identifiers and there are no matches; or contains wildcard expressions and +there are only partial matches. By default, it returns an empty array when there are no matches and the subset of results when there are partial matches. +If `false`, the request returns a 404 status code when there are no matches or only partial matches. +- **`force` (Optional, boolean)**: Forcefully stops the deployment, even if it is used by ingest pipelines. You can't use these pipelines until you +restart the model deployment. -Create or update a pipeline. Changes made using this API take effect immediately. +## client.ml.updateDataFrameAnalytics [_ml.update_data_frame_analytics] +Update a data frame analytics job. -[Endpoint documentation](docs-content://manage-data/ingest/transform-enrich/ingest-pipelines.md) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-update-data-frame-analytics) ```ts -client.ingest.putPipeline({ id }) +client.ml.updateDataFrameAnalytics({ id }) ``` +### Arguments [_arguments_ml.update_data_frame_analytics] -### Arguments [_arguments_239] +#### Request (object) [_request_ml.update_data_frame_analytics] +- **`id` (string)**: Identifier for the data frame analytics job. This identifier can contain +lowercase alphanumeric characters (a-z and 0-9), hyphens, and +underscores. It must start and end with alphanumeric characters. +- **`description` (Optional, string)**: A description of the job. +- **`model_memory_limit` (Optional, string)**: The approximate maximum amount of memory resources that are permitted for +analytical processing. If your `elasticsearch.yml` file contains an +`xpack.ml.max_model_memory_limit` setting, an error occurs when you try +to create data frame analytics jobs that have `model_memory_limit` values +greater than that setting. +- **`max_num_threads` (Optional, number)**: The maximum number of threads to be used by the analysis. Using more +threads may decrease the time necessary to complete the analysis at the +cost of using more CPU. Note that the process may use additional threads +for operational functionality other than the analysis itself. +- **`allow_lazy_start` (Optional, boolean)**: Specifies whether this job can start when there is insufficient machine +learning node capacity for it to be immediately assigned to a node. -* **Request (object):** +## client.ml.updateDatafeed [_ml.update_datafeed] +Update a datafeed. +You must stop and start the datafeed for the changes to be applied. +When Elasticsearch security features are enabled, your datafeed remembers which roles the user who updated it had at +the time of the update and runs the query using those same roles. If you provide secondary authorization headers, +those credentials are used instead. - * **`id` (string)**: ID of the ingest pipeline to create or update. - * **`_meta` (Optional, Record)**: Optional metadata about the ingest pipeline. May have any contents. This map is not automatically generated by Elasticsearch. - * **`description` (Optional, string)**: Description of the ingest pipeline. - * **`on_failure` (Optional, { append, attachment, bytes, circle, community_id, convert, csv, date, date_index_name, dissect, dot_expander, drop, enrich, fail, fingerprint, foreach, ip_location, geo_grid, geoip, grok, gsub, html_strip, inference, join, json, kv, lowercase, network_direction, pipeline, redact, registered_domain, remove, rename, reroute, script, set, set_security_user, sort, split, terminate, trim, uppercase, urldecode, uri_parts, user_agent }[])**: Processors to run immediately after a processor failure. Each processor supports a processor-level `on_failure` value. If a processor without an `on_failure` value fails, Elasticsearch uses this pipeline-level parameter as a fallback. The processors in this parameter run sequentially in the order specified. Elasticsearch will not attempt to run the pipeline’s remaining processors. - * **`processors` (Optional, { append, attachment, bytes, circle, community_id, convert, csv, date, date_index_name, dissect, dot_expander, drop, enrich, fail, fingerprint, foreach, ip_location, geo_grid, geoip, grok, gsub, html_strip, inference, join, json, kv, lowercase, network_direction, pipeline, redact, registered_domain, remove, rename, reroute, script, set, set_security_user, sort, split, terminate, trim, uppercase, urldecode, uri_parts, user_agent }[])**: Processors used to perform transformations on documents before indexing. Processors run sequentially in the order specified. - * **`version` (Optional, number)**: Version number used by external systems to track ingest pipelines. This parameter is intended for external systems only. Elasticsearch does not use or validate pipeline version numbers. - * **`deprecated` (Optional, boolean)**: Marks this ingest pipeline as deprecated. When a deprecated ingest pipeline is referenced as the default or final pipeline when creating or updating a non-deprecated index template, Elasticsearch will emit a deprecation warning. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - * **`if_version` (Optional, number)**: Required version for optimistic concurrency control for pipeline updates +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-update-datafeed) +```ts +client.ml.updateDatafeed({ datafeed_id }) +``` +### Arguments [_arguments_ml.update_datafeed] + +#### Request (object) [_request_ml.update_datafeed] +- **`datafeed_id` (string)**: A numerical character string that uniquely identifies the datafeed. +This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. +It must start and end with alphanumeric characters. +- **`aggregations` (Optional, Record)**: If set, the datafeed performs aggregation searches. Support for aggregations is limited and should be used only +with low cardinality data. +- **`chunking_config` (Optional, { mode, time_span })**: Datafeeds might search over long time periods, for several months or years. This search is split into time +chunks in order to ensure the load on Elasticsearch is managed. Chunking configuration controls how the size of +these time chunks are calculated; it is an advanced configuration option. +- **`delayed_data_check_config` (Optional, { check_window, enabled })**: Specifies whether the datafeed checks for missing data and the size of the window. The datafeed can optionally +search over indices that have already been read in an effort to determine whether any data has subsequently been +added to the index. If missing data is found, it is a good indication that the `query_delay` is set too low and +the data is being indexed after the datafeed has passed that moment in time. This check runs only on real-time +datafeeds. +- **`frequency` (Optional, string | -1 | 0)**: The interval at which scheduled queries are made while the datafeed runs in real time. The default value is +either the bucket span for short bucket spans, or, for longer bucket spans, a sensible fraction of the bucket +span. When `frequency` is shorter than the bucket span, interim results for the last (partial) bucket are +written then eventually overwritten by the full bucket results. If the datafeed uses aggregations, this value +must be divisible by the interval of the date histogram aggregation. +- **`indices` (Optional, string[])**: An array of index names. Wildcards are supported. If any of the indices are in remote clusters, the machine +learning nodes must have the `remote_cluster_client` role. +- **`indices_options` (Optional, { allow_no_indices, expand_wildcards, ignore_unavailable, ignore_throttled })**: Specifies index expansion options that are used during search. +- **`job_id` (Optional, string)** +- **`max_empty_searches` (Optional, number)**: If a real-time datafeed has never seen any data (including during any initial training period), it automatically +stops and closes the associated job after this many real-time searches return no documents. In other words, +it stops after `frequency` times `max_empty_searches` of real-time operation. If not set, a datafeed with no +end time that sees no data remains started until it is explicitly stopped. By default, it is not set. +- **`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: The Elasticsearch query domain-specific language (DSL). This value corresponds to the query object in an +Elasticsearch search POST body. All the options that are supported by Elasticsearch can be used, as this +object is passed verbatim to Elasticsearch. Note that if you change the query, the analyzed data is also +changed. Therefore, the time required to learn might be long and the understandability of the results is +unpredictable. If you want to make significant changes to the source data, it is recommended that you +clone the job and datafeed and make the amendments in the clone. Let both run in parallel and close one +when you are satisfied with the results of the job. +- **`query_delay` (Optional, string | -1 | 0)**: The number of seconds behind real time that data is queried. For example, if data from 10:04 a.m. might +not be searchable in Elasticsearch until 10:06 a.m., set this property to 120 seconds. The default +value is randomly selected between `60s` and `120s`. This randomness improves the query performance +when there are multiple jobs running on the same node. +- **`runtime_mappings` (Optional, Record)**: Specifies runtime fields for the datafeed search. +- **`script_fields` (Optional, Record)**: Specifies scripts that evaluate custom expressions and returns script fields to the datafeed. +The detector configuration objects in a job can contain functions that use these script fields. +- **`scroll_size` (Optional, number)**: The size parameter that is used in Elasticsearch searches when the datafeed does not use aggregations. +The maximum value is the value of `index.max_result_window`. +- **`allow_no_indices` (Optional, boolean)**: If `true`, wildcard indices expressions that resolve into no concrete indices are ignored. This includes the +`_all` string or when no indices are specified. +- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines +whether wildcard expressions match hidden data streams. Supports a list of values. Valid values are: -### simulate [_simulate] +* `all`: Match any data stream or index, including hidden ones. +* `closed`: Match closed, non-hidden indices. Also matches any non-hidden data stream. Data streams cannot be closed. +* `hidden`: Match hidden data streams and hidden indices. Must be combined with `open`, `closed`, or both. +* `none`: Wildcard patterns are not accepted. +* `open`: Match open, non-hidden indices. Also matches any non-hidden data stream. +- **`ignore_throttled` (Optional, boolean)**: If `true`, concrete, expanded or aliased indices are ignored when frozen. +- **`ignore_unavailable` (Optional, boolean)**: If `true`, unavailable indices (missing or closed) are ignored. -Simulate a pipeline. Run an ingest pipeline against a set of provided documents. You can either specify an existing pipeline to use with the provided documents or supply a pipeline definition in the body of the request. +## client.ml.updateFilter [_ml.update_filter] +Update a filter. +Updates the description of a filter, adds items, or removes items from the list. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-simulate) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-update-filter) ```ts -client.ingest.simulate({ docs }) +client.ml.updateFilter({ filter_id }) ``` +### Arguments [_arguments_ml.update_filter] -### Arguments [_arguments_240] +#### Request (object) [_request_ml.update_filter] +- **`filter_id` (string)**: A string that uniquely identifies a filter. +- **`add_items` (Optional, string[])**: The items to add to the filter. +- **`description` (Optional, string)**: A description for the filter. +- **`remove_items` (Optional, string[])**: The items to remove from the filter. -* **Request (object):** - - * **`docs` ({ _id, _index, _source }[])**: Sample documents to test in the pipeline. - * **`id` (Optional, string)**: Pipeline to test. If you don’t specify a `pipeline` in the request body, this parameter is required. - * **`pipeline` (Optional, { description, on_failure, processors, version, deprecated, _meta })**: Pipeline to test. If you don’t specify the `pipeline` request path parameter, this parameter is required. If you specify both this and the request path parameter, the API only uses the request path parameter. - * **`verbose` (Optional, boolean)**: If `true`, the response includes output data for each processor in the executed pipeline. +## client.ml.updateJob [_ml.update_job] +Update an anomaly detection job. +Updates certain properties of an anomaly detection job. +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-update-job) +```ts +client.ml.updateJob({ job_id }) +``` -## license [_license] +### Arguments [_arguments_ml.update_job] + +#### Request (object) [_request_ml.update_job] +- **`job_id` (string)**: Identifier for the job. +- **`allow_lazy_open` (Optional, boolean)**: Advanced configuration option. Specifies whether this job can open when +there is insufficient machine learning node capacity for it to be +immediately assigned to a node. If `false` and a machine learning node +with capacity to run the job cannot immediately be found, the open +anomaly detection jobs API returns an error. However, this is also +subject to the cluster-wide `xpack.ml.max_lazy_ml_nodes` setting. If this +option is set to `true`, the open anomaly detection jobs API does not +return an error and the job waits in the opening state until sufficient +machine learning node capacity is available. +- **`analysis_limits` (Optional, { model_memory_limit })** +- **`background_persist_interval` (Optional, string | -1 | 0)**: Advanced configuration option. The time between each periodic persistence +of the model. +The default value is a randomized value between 3 to 4 hours, which +avoids all jobs persisting at exactly the same time. The smallest allowed +value is 1 hour. +For very large models (several GB), persistence could take 10-20 minutes, +so do not set the value too low. +If the job is open when you make the update, you must stop the datafeed, +close the job, then reopen the job and restart the datafeed for the +changes to take effect. +- **`custom_settings` (Optional, Record)**: Advanced configuration option. Contains custom meta data about the job. +For example, it can contain custom URL information as shown in Adding +custom URLs to machine learning results. +- **`categorization_filters` (Optional, string[])** +- **`description` (Optional, string)**: A description of the job. +- **`model_plot_config` (Optional, { annotations_enabled, enabled, terms })** +- **`model_prune_window` (Optional, string | -1 | 0)** +- **`daily_model_snapshot_retention_after_days` (Optional, number)**: Advanced configuration option, which affects the automatic removal of old +model snapshots for this job. It specifies a period of time (in days) +after which only the first snapshot per day is retained. This period is +relative to the timestamp of the most recent snapshot for this job. Valid +values range from 0 to `model_snapshot_retention_days`. For jobs created +before version 7.8.0, the default value matches +`model_snapshot_retention_days`. +- **`model_snapshot_retention_days` (Optional, number)**: Advanced configuration option, which affects the automatic removal of old +model snapshots for this job. It specifies the maximum period of time (in +days) that snapshots are retained. This period is relative to the +timestamp of the most recent snapshot for this job. +- **`renormalization_window_days` (Optional, number)**: Advanced configuration option. The period over which adjustments to the +score are applied, as new data is seen. +- **`results_retention_days` (Optional, number)**: Advanced configuration option. The period of time (in days) that results +are retained. Age is calculated relative to the timestamp of the latest +bucket result. If this property has a non-null value, once per day at +00:30 (server time), results that are the specified number of days older +than the latest bucket result are deleted from Elasticsearch. The default +value is null, which means all results are retained. +- **`groups` (Optional, string[])**: A list of job groups. A job can belong to no groups or many. +- **`detectors` (Optional, { detector_index, description, custom_rules }[])**: An array of detector update objects. +- **`per_partition_categorization` (Optional, { enabled, stop_on_warn })**: Settings related to how categorization interacts with partition fields. + +## client.ml.updateModelSnapshot [_ml.update_model_snapshot] +Update a snapshot. +Updates certain properties of a snapshot. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-update-model-snapshot) +```ts +client.ml.updateModelSnapshot({ job_id, snapshot_id }) +``` -### delete [_delete_7] +### Arguments [_arguments_ml.update_model_snapshot] -Delete the license. When the license expires, your subscription level reverts to Basic. +#### Request (object) [_request_ml.update_model_snapshot] +- **`job_id` (string)**: Identifier for the anomaly detection job. +- **`snapshot_id` (string)**: Identifier for the model snapshot. +- **`description` (Optional, string)**: A description of the model snapshot. +- **`retain` (Optional, boolean)**: If `true`, this snapshot will not be deleted during automatic cleanup of +snapshots older than `model_snapshot_retention_days`. However, this +snapshot will be deleted when the job is deleted. -If the operator privileges feature is enabled, only operator users can use this API. +## client.ml.updateTrainedModelDeployment [_ml.update_trained_model_deployment] +Update a trained model deployment. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-license-delete) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-update-trained-model-deployment) ```ts -client.license.delete({ ... }) +client.ml.updateTrainedModelDeployment({ model_id }) ``` +### Arguments [_arguments_ml.update_trained_model_deployment] + +#### Request (object) [_request_ml.update_trained_model_deployment] +- **`model_id` (string)**: The unique identifier of the trained model. Currently, only PyTorch models are supported. +- **`number_of_allocations` (Optional, number)**: The number of model allocations on each node where the model is deployed. +All allocations on a node share the same copy of the model in memory but use +a separate set of threads to evaluate the model. +Increasing this value generally increases the throughput. +If this setting is greater than the number of hardware threads +it will automatically be changed to a value less than the number of hardware threads. +If adaptive_allocations is enabled, do not set this value, because it’s automatically set. +- **`adaptive_allocations` (Optional, { enabled, min_number_of_allocations, max_number_of_allocations })**: Adaptive allocations configuration. When enabled, the number of allocations +is set based on the current load. +If adaptive_allocations is enabled, do not set the number of allocations manually. + +## client.ml.upgradeJobSnapshot [_ml.upgrade_job_snapshot] +Upgrade a snapshot. +Upgrade an anomaly detection model snapshot to the latest major version. +Over time, older snapshot formats are deprecated and removed. Anomaly +detection jobs support only snapshots that are from the current or previous +major version. +This API provides a means to upgrade a snapshot to the current major version. +This aids in preparing the cluster for an upgrade to the next major version. +Only one snapshot per anomaly detection job can be upgraded at a time and the +upgraded snapshot cannot be the current snapshot of the anomaly detection +job. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-upgrade-job-snapshot) -### Arguments [_arguments_241] +```ts +client.ml.upgradeJobSnapshot({ job_id, snapshot_id }) +``` -* **Request (object):** +### Arguments [_arguments_ml.upgrade_job_snapshot] - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. - * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +#### Request (object) [_request_ml.upgrade_job_snapshot] +- **`job_id` (string)**: Identifier for the anomaly detection job. +- **`snapshot_id` (string)**: A numerical character string that uniquely identifies the model snapshot. +- **`wait_for_completion` (Optional, boolean)**: When true, the API won’t respond until the upgrade is complete. +Otherwise, it responds as soon as the upgrade task is assigned to a node. +- **`timeout` (Optional, string | -1 | 0)**: Controls the time to wait for the request to complete. +## client.nodes.clearRepositoriesMeteringArchive [_nodes.clear_repositories_metering_archive] +Clear the archived repositories metering. +Clear the archived repositories metering information in the cluster. +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-nodes-clear-repositories-metering-archive) -### get [_get_7] +```ts +client.nodes.clearRepositoriesMeteringArchive({ node_id, max_archive_version }) +``` -Get license information. Get information about your Elastic license including its type, its status, when it was issued, and when it expires. +### Arguments [_arguments_nodes.clear_repositories_metering_archive] -::::{note} -If the master node is generating a new cluster state, the get license API may return a `404 Not Found` response. If you receive an unexpected 404 response after cluster startup, wait a short period and retry the request. -:::: +#### Request (object) [_request_nodes.clear_repositories_metering_archive] +- **`node_id` (string | string[])**: List of node IDs or names used to limit returned information. +- **`max_archive_version` (number)**: Specifies the maximum `archive_version` to be cleared from the archive. +## client.nodes.getRepositoriesMeteringInfo [_nodes.get_repositories_metering_info] +Get cluster repositories metering. +Get repositories metering information for a cluster. +This API exposes monotonically non-decreasing counters and it is expected that clients would durably store the information needed to compute aggregations over a period of time. +Additionally, the information exposed by this API is volatile, meaning that it will not be present after node restarts. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-license-get) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-nodes-get-repositories-metering-info) ```ts -client.license.get({ ... }) +client.nodes.getRepositoriesMeteringInfo({ node_id }) ``` +### Arguments [_arguments_nodes.get_repositories_metering_info] -### Arguments [_arguments_242] +#### Request (object) [_request_nodes.get_repositories_metering_info] +- **`node_id` (string | string[])**: List of node IDs or names used to limit returned information. -* **Request (object):** +## client.nodes.hotThreads [_nodes.hot_threads] +Get the hot threads for nodes. +Get a breakdown of the hot threads on each selected node in the cluster. +The output is plain text with a breakdown of the top hot threads for each node. - * **`accept_enterprise` (Optional, boolean)**: If `true`, this parameter returns enterprise for Enterprise license types. If `false`, this parameter returns platinum for both platinum and enterprise license types. This behavior is maintained for backwards compatibility. This parameter is deprecated and will always be set to true in 8.x. - * **`local` (Optional, boolean)**: Specifies whether to retrieve local information. The default value is `false`, which means the information is retrieved from the master node. +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-nodes-hot-threads) +```ts +client.nodes.hotThreads({ ... }) +``` +### Arguments [_arguments_nodes.hot_threads] -### get_basic_status [_get_basic_status] +#### Request (object) [_request_nodes.hot_threads] +- **`node_id` (Optional, string | string[])**: List of node IDs or names used to limit returned information. +- **`ignore_idle_threads` (Optional, boolean)**: If true, known idle threads (e.g. waiting in a socket select, or to get +a task from an empty queue) are filtered out. +- **`interval` (Optional, string | -1 | 0)**: The interval to do the second sampling of threads. +- **`snapshots` (Optional, number)**: Number of samples of thread stacktrace. +- **`threads` (Optional, number)**: Specifies the number of hot threads to provide information for. +- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received +before the timeout expires, the request fails and returns an error. +- **`type` (Optional, Enum("cpu" | "wait" | "block" | "gpu" | "mem"))**: The type to sample. +- **`sort` (Optional, Enum("cpu" | "wait" | "block" | "gpu" | "mem"))**: The sort order for 'cpu' type (default: total) -Get the basic license status. +## client.nodes.info [_nodes.info] +Get node information. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-license-get-basic-status) +By default, the API returns all attributes and core settings for cluster nodes. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-nodes-info) ```ts -client.license.getBasicStatus() +client.nodes.info({ ... }) ``` +### Arguments [_arguments_nodes.info] -### get_trial_status [_get_trial_status] +#### Request (object) [_request_nodes.info] +- **`node_id` (Optional, string | string[])**: List of node IDs or names used to limit returned information. +- **`metric` (Optional, string | string[])**: Limits the information returned to the specific metrics. Supports a list, such as http,ingest. +- **`flat_settings` (Optional, boolean)**: If true, returns settings in flat format. +- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. -Get the trial status. +## client.nodes.reloadSecureSettings [_nodes.reload_secure_settings] +Reload the keystore on nodes in the cluster. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-license-get-trial-status) +Secure settings are stored in an on-disk keystore. Certain of these settings are reloadable. +That is, you can change them on disk and reload them without restarting any nodes in the cluster. +When you have updated reloadable secure settings in your keystore, you can use this API to reload those settings on each node. -```ts -client.license.getTrialStatus() -``` +When the Elasticsearch keystore is password protected and not simply obfuscated, you must provide the password for the keystore when you reload the secure settings. +Reloading the settings for the whole cluster assumes that the keystores for all nodes are protected with the same password; this method is allowed only when inter-node communications are encrypted. +Alternatively, you can reload the secure settings on each node by locally accessing the API and passing the node-specific Elasticsearch keystore password. +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-nodes-reload-secure-settings) -### post [_post_2] +```ts +client.nodes.reloadSecureSettings({ ... }) +``` -Update the license. You can update your license at runtime without shutting down your nodes. License updates take effect immediately. If the license you are installing does not support all of the features that were available with your previous license, however, you are notified in the response. You must then re-submit the API request with the acknowledge parameter set to true. +### Arguments [_arguments_nodes.reload_secure_settings] -::::{note} -If Elasticsearch security features are enabled and you are installing a gold or higher license, you must enable TLS on the transport networking layer before you install the license. If the operator privileges feature is enabled, only operator users can use this API. -:::: +#### Request (object) [_request_nodes.reload_secure_settings] +- **`node_id` (Optional, string | string[])**: The names of particular nodes in the cluster to target. +- **`secure_settings_password` (Optional, string)**: The password for the Elasticsearch keystore. +- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. +## client.nodes.stats [_nodes.stats] +Get node statistics. +Get statistics for nodes in a cluster. +By default, all stats are returned. You can limit the returned information by using metrics. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-license-post) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-nodes-stats) ```ts -client.license.post({ ... }) +client.nodes.stats({ ... }) ``` +### Arguments [_arguments_nodes.stats] + +#### Request (object) [_request_nodes.stats] +- **`node_id` (Optional, string | string[])**: List of node IDs or names used to limit returned information. +- **`metric` (Optional, string | string[])**: Limit the information returned to the specified metrics +- **`index_metric` (Optional, string | string[])**: Limit the information returned for indices metric to the specific index metrics. It can be used only if indices (or all) metric is specified. +- **`completion_fields` (Optional, string | string[])**: List or wildcard expressions of fields to include in fielddata and suggest statistics. +- **`fielddata_fields` (Optional, string | string[])**: List or wildcard expressions of fields to include in fielddata statistics. +- **`fields` (Optional, string | string[])**: List or wildcard expressions of fields to include in the statistics. +- **`groups` (Optional, boolean)**: List of search groups to include in the search statistics. +- **`include_segment_file_sizes` (Optional, boolean)**: If true, the call reports the aggregated disk usage of each one of the Lucene index files (only applies if segment stats are requested). +- **`level` (Optional, Enum("cluster" | "indices" | "shards"))**: Indicates whether statistics are aggregated at the cluster, index, or shard level. +- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +- **`types` (Optional, string[])**: A list of document types for the indexing index metric. +- **`include_unloaded_segments` (Optional, boolean)**: If `true`, the response includes information from segments that are not loaded into memory. -### Arguments [_arguments_243] +## client.nodes.usage [_nodes.usage] +Get feature usage information. -* **Request (object):** +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-nodes-usage) - * **`license` (Optional, { expiry_date_in_millis, issue_date_in_millis, start_date_in_millis, issued_to, issuer, max_nodes, max_resource_units, signature, type, uid })** - * **`licenses` (Optional, { expiry_date_in_millis, issue_date_in_millis, start_date_in_millis, issued_to, issuer, max_nodes, max_resource_units, signature, type, uid }[])**: A sequence of one or more JSON documents containing the license information. - * **`acknowledge` (Optional, boolean)**: Specifies whether you acknowledge the license changes. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. - * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +```ts +client.nodes.usage({ ... }) +``` +### Arguments [_arguments_nodes.usage] +#### Request (object) [_request_nodes.usage] +- **`node_id` (Optional, string | string[])**: A list of node IDs or names to limit the returned information; use `_local` to return information from the node you're connecting to, leave empty to get information from all nodes +- **`metric` (Optional, string | string[])**: Limits the information returned to the specific metrics. +A list of the following options: `_all`, `rest_actions`. +- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. -### post_start_basic [_post_start_basic] +## client.queryRules.deleteRule [_query_rules.delete_rule] +Delete a query rule. +Delete a query rule within a query ruleset. +This is a destructive action that is only recoverable by re-adding the same rule with the create or update query rule API. -Start a basic license. Start an indefinite basic license, which gives access to all the basic features. +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-query-rules-delete-rule) -::::{note} -In order to start a basic license, you must not currently have a basic license. -:::: +```ts +client.queryRules.deleteRule({ ruleset_id, rule_id }) +``` +### Arguments [_arguments_query_rules.delete_rule] -If the basic license does not support all of the features that are available with your current license, however, you are notified in the response. You must then re-submit the API request with the `acknowledge` parameter set to `true`. +#### Request (object) [_request_query_rules.delete_rule] +- **`ruleset_id` (string)**: The unique identifier of the query ruleset containing the rule to delete +- **`rule_id` (string)**: The unique identifier of the query rule within the specified ruleset to delete -To check the status of your basic license, use the get basic license API. +## client.queryRules.deleteRuleset [_query_rules.delete_ruleset] +Delete a query ruleset. +Remove a query ruleset and its associated data. +This is a destructive action that is not recoverable. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-license-post-start-basic) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-query-rules-delete-ruleset) ```ts -client.license.postStartBasic({ ... }) +client.queryRules.deleteRuleset({ ruleset_id }) ``` +### Arguments [_arguments_query_rules.delete_ruleset] + +#### Request (object) [_request_query_rules.delete_ruleset] +- **`ruleset_id` (string)**: The unique identifier of the query ruleset to delete + +## client.queryRules.getRule [_query_rules.get_rule] +Get a query rule. +Get details about a query rule within a query ruleset. -### Arguments [_arguments_244] +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-query-rules-get-rule) -* **Request (object):** +```ts +client.queryRules.getRule({ ruleset_id, rule_id }) +``` - * **`acknowledge` (Optional, boolean)**: whether the user has acknowledged acknowledge messages (default: false) - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. - * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +### Arguments [_arguments_query_rules.get_rule] +#### Request (object) [_request_query_rules.get_rule] +- **`ruleset_id` (string)**: The unique identifier of the query ruleset containing the rule to retrieve +- **`rule_id` (string)**: The unique identifier of the query rule within the specified ruleset to retrieve +## client.queryRules.getRuleset [_query_rules.get_ruleset] +Get a query ruleset. +Get details about a query ruleset. -### post_start_trial [_post_start_trial] +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-query-rules-get-ruleset) -Start a trial. Start a 30-day trial, which gives access to all subscription features. +```ts +client.queryRules.getRuleset({ ruleset_id }) +``` -::::{note} -You are allowed to start a trial only if your cluster has not already activated a trial for the current major product version. For example, if you have already activated a trial for v8.0, you cannot start a new trial until v9.0. You can, however, request an extended trial at [https://www.elastic.co/trialextension](https://www.elastic.co/trialextension). -:::: +### Arguments [_arguments_query_rules.get_ruleset] +#### Request (object) [_request_query_rules.get_ruleset] +- **`ruleset_id` (string)**: The unique identifier of the query ruleset -To check the status of your trial, use the get trial status API. +## client.queryRules.listRulesets [_query_rules.list_rulesets] +Get all query rulesets. +Get summarized information about the query rulesets. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-license-post-start-trial) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-query-rules-list-rulesets) ```ts -client.license.postStartTrial({ ... }) +client.queryRules.listRulesets({ ... }) ``` +### Arguments [_arguments_query_rules.list_rulesets] -### Arguments [_arguments_245] +#### Request (object) [_request_query_rules.list_rulesets] +- **`from` (Optional, number)**: The offset from the first result to fetch. +- **`size` (Optional, number)**: The maximum number of results to retrieve. -* **Request (object):** +## client.queryRules.putRule [_query_rules.put_rule] +Create or update a query rule. +Create or update a query rule within a query ruleset. - * **`acknowledge` (Optional, boolean)**: whether the user has acknowledged acknowledge messages (default: false) - * **`type_query_string` (Optional, string)** - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +IMPORTANT: Due to limitations within pinned queries, you can only pin documents using ids or docs, but cannot use both in single rule. +It is advised to use one or the other in query rulesets, to avoid errors. +Additionally, pinned queries have a maximum limit of 100 pinned hits. +If multiple matching rules pin more than 100 documents, only the first 100 documents are pinned in the order they are specified in the ruleset. +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-query-rules-put-rule) +```ts +client.queryRules.putRule({ ruleset_id, rule_id, type, criteria, actions }) +``` -## logstash [_logstash] +### Arguments [_arguments_query_rules.put_rule] +#### Request (object) [_request_query_rules.put_rule] +- **`ruleset_id` (string)**: The unique identifier of the query ruleset containing the rule to be created or updated. +- **`rule_id` (string)**: The unique identifier of the query rule within the specified ruleset to be created or updated. +- **`type` (Enum("pinned" | "exclude"))**: The type of rule. +- **`criteria` ({ type, metadata, values } | { type, metadata, values }[])**: The criteria that must be met for the rule to be applied. +If multiple criteria are specified for a rule, all criteria must be met for the rule to be applied. +- **`actions` ({ ids, docs })**: The actions to take when the rule is matched. +The format of this action depends on the rule type. +- **`priority` (Optional, number)** -### delete_pipeline [_delete_pipeline_2] +## client.queryRules.putRuleset [_query_rules.put_ruleset] +Create or update a query ruleset. +There is a limit of 100 rules per ruleset. +This limit can be increased by using the `xpack.applications.rules.max_rules_per_ruleset` cluster setting. -Delete a Logstash pipeline. Delete a pipeline that is used for Logstash Central Management. If the request succeeds, you receive an empty response with an appropriate status code. +IMPORTANT: Due to limitations within pinned queries, you can only select documents using `ids` or `docs`, but cannot use both in single rule. +It is advised to use one or the other in query rulesets, to avoid errors. +Additionally, pinned queries have a maximum limit of 100 pinned hits. +If multiple matching rules pin more than 100 documents, only the first 100 documents are pinned in the order they are specified in the ruleset. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-logstash-delete-pipeline) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-query-rules-put-ruleset) ```ts -client.logstash.deletePipeline({ id }) +client.queryRules.putRuleset({ ruleset_id, rules }) ``` +### Arguments [_arguments_query_rules.put_ruleset] + +#### Request (object) [_request_query_rules.put_ruleset] +- **`ruleset_id` (string)**: The unique identifier of the query ruleset to be created or updated. +- **`rules` ({ rule_id, type, criteria, actions, priority } | { rule_id, type, criteria, actions, priority }[])** + +## client.queryRules.test [_query_rules.test] +Test a query ruleset. +Evaluate match criteria against a query ruleset to identify the rules that would match that criteria. -### Arguments [_arguments_246] +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-query-rules-test) + +```ts +client.queryRules.test({ ruleset_id, match_criteria }) +``` -* **Request (object):** +### Arguments [_arguments_query_rules.test] - * **`id` (string)**: An identifier for the pipeline. +#### Request (object) [_request_query_rules.test] +- **`ruleset_id` (string)**: The unique identifier of the query ruleset to be created or updated +- **`match_criteria` (Record)**: The match criteria to apply to rules in the given query ruleset. +Match criteria should match the keys defined in the `criteria.metadata` field of the rule. +## client.rollup.deleteJob [_rollup.delete_job] +Delete a rollup job. +A job must be stopped before it can be deleted. +If you attempt to delete a started job, an error occurs. +Similarly, if you attempt to delete a nonexistent job, an exception occurs. -### get_pipeline [_get_pipeline_2] +IMPORTANT: When you delete a job, you remove only the process that is actively monitoring and rolling up data. +The API does not delete any previously rolled up data. +This is by design; a user may wish to roll up a static data set. +Because the data set is static, after it has been fully rolled up there is no need to keep the indexing rollup job around (as there will be no new data). +Thus the job can be deleted, leaving behind the rolled up data for analysis. +If you wish to also remove the rollup data and the rollup index contains the data for only a single job, you can delete the whole rollup index. +If the rollup index stores data from several jobs, you must issue a delete-by-query that targets the rollup job's identifier in the rollup index. For example: -Get Logstash pipelines. Get pipelines that are used for Logstash Central Management. +``` +POST my_rollup_index/_delete_by_query +{ + "query": { + "term": { + "_rollup.id": "the_rollup_job_id" + } + } +} +``` -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-logstash-get-pipeline) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-rollup-delete-job) ```ts -client.logstash.getPipeline({ ... }) +client.rollup.deleteJob({ id }) ``` +### Arguments [_arguments_rollup.delete_job] + +#### Request (object) [_request_rollup.delete_job] +- **`id` (string)**: Identifier for the job. -### Arguments [_arguments_247] +## client.rollup.getJobs [_rollup.get_jobs] +Get rollup job information. +Get the configuration, stats, and status of rollup jobs. -* **Request (object):** +NOTE: This API returns only active (both `STARTED` and `STOPPED`) jobs. +If a job was created, ran for a while, then was deleted, the API does not return any details about it. +For details about a historical rollup job, the rollup capabilities API may be more useful. - * **`id` (Optional, string | string[])**: A list of pipeline identifiers. +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-rollup-get-jobs) + +```ts +client.rollup.getJobs({ ... }) +``` +### Arguments [_arguments_rollup.get_jobs] +#### Request (object) [_request_rollup.get_jobs] +- **`id` (Optional, string)**: Identifier for the rollup job. +If it is `_all` or omitted, the API returns all rollup jobs. -### put_pipeline [_put_pipeline_2] +## client.rollup.getRollupCaps [_rollup.get_rollup_caps] +Get the rollup job capabilities. +Get the capabilities of any rollup jobs that have been configured for a specific index or index pattern. -Create or update a Logstash pipeline. +This API is useful because a rollup job is often configured to rollup only a subset of fields from the source index. +Furthermore, only certain aggregations can be configured for various fields, leading to a limited subset of functionality depending on that configuration. +This API enables you to inspect an index and determine: -Create a pipeline that is used for Logstash Central Management. If the specified pipeline exists, it is replaced. +1. Does this index have associated rollup data somewhere in the cluster? +2. If yes to the first question, what fields were rolled up, what aggregations can be performed, and where does the data live? -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-logstash-put-pipeline) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-rollup-get-rollup-caps) ```ts -client.logstash.putPipeline({ id }) +client.rollup.getRollupCaps({ ... }) ``` +### Arguments [_arguments_rollup.get_rollup_caps] + +#### Request (object) [_request_rollup.get_rollup_caps] +- **`id` (Optional, string)**: Index, indices or index-pattern to return rollup capabilities for. +`_all` may be used to fetch rollup capabilities from all jobs. -### Arguments [_arguments_248] +## client.rollup.getRollupIndexCaps [_rollup.get_rollup_index_caps] +Get the rollup index capabilities. +Get the rollup capabilities of all jobs inside of a rollup index. +A single rollup index may store the data for multiple rollup jobs and may have a variety of capabilities depending on those jobs. This API enables you to determine: -* **Request (object):** +* What jobs are stored in an index (or indices specified via a pattern)? +* What target indices were rolled up, what fields were used in those rollups, and what aggregations can be performed on each job? - * **`id` (string)**: An identifier for the pipeline. - * **`pipeline` (Optional, { description, on_failure, processors, version, deprecated, _meta })** +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-rollup-get-rollup-index-caps) +```ts +client.rollup.getRollupIndexCaps({ index }) +``` +### Arguments [_arguments_rollup.get_rollup_index_caps] -## migration [_migration] +#### Request (object) [_request_rollup.get_rollup_index_caps] +- **`index` (string | string[])**: Data stream or index to check for rollup capabilities. +Wildcard (`*`) expressions are supported. +## client.rollup.putJob [_rollup.put_job] +Create a rollup job. -### deprecations [_deprecations] +WARNING: From 8.15.0, calling this API in a cluster with no rollup usage will fail with a message about the deprecation and planned removal of rollup features. A cluster needs to contain either a rollup job or a rollup index in order for this API to be allowed to run. -Get deprecation information. Get information about different cluster, node, and index level settings that use deprecated features that will be removed or changed in the next major version. +The rollup job configuration contains all the details about how the job should run, when it indexes documents, and what future queries will be able to run against the rollup index. -::::{tip} -This APIs is designed for indirect use by the Upgrade Assistant. You are strongly recommended to use the Upgrade Assistant. -:::: +There are three main sections to the job configuration: the logistical details about the job (for example, the cron schedule), the fields that are used for grouping, and what metrics to collect for each group. +Jobs are created in a `STOPPED` state. You can start them with the start rollup jobs API. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-migration-deprecations) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-rollup-put-job) ```ts -client.migration.deprecations({ ... }) +client.rollup.putJob({ id, cron, groups, index_pattern, page_size, rollup_index }) ``` +### Arguments [_arguments_rollup.put_job] + +#### Request (object) [_request_rollup.put_job] +- **`id` (string)**: Identifier for the rollup job. This can be any alphanumeric string and uniquely identifies the +data that is associated with the rollup job. The ID is persistent; it is stored with the rolled +up data. If you create a job, let it run for a while, then delete the job, the data that the job +rolled up is still be associated with this job ID. You cannot create a new job with the same ID +since that could lead to problems with mismatched job configurations. +- **`cron` (string)**: A cron string which defines the intervals when the rollup job should be executed. When the interval +triggers, the indexer attempts to rollup the data in the index pattern. The cron pattern is unrelated +to the time interval of the data being rolled up. For example, you may wish to create hourly rollups +of your document but to only run the indexer on a daily basis at midnight, as defined by the cron. The +cron pattern is defined just like a Watcher cron schedule. +- **`groups` ({ date_histogram, histogram, terms })**: Defines the grouping fields and aggregations that are defined for this rollup job. These fields will then be +available later for aggregating into buckets. These aggs and fields can be used in any combination. Think of +the groups configuration as defining a set of tools that can later be used in aggregations to partition the +data. Unlike raw data, we have to think ahead to which fields and aggregations might be used. Rollups provide +enough flexibility that you simply need to determine which fields are needed, not in what order they are needed. +- **`index_pattern` (string)**: The index or index pattern to roll up. Supports wildcard-style patterns (`logstash-*`). The job attempts to +rollup the entire index or index-pattern. +- **`page_size` (number)**: The number of bucket results that are processed on each iteration of the rollup indexer. A larger value tends +to execute faster, but requires more memory during processing. This value has no effect on how the data is +rolled up; it is merely used for tweaking the speed or memory cost of the indexer. +- **`rollup_index` (string)**: The index that contains the rollup results. The index can be shared with other rollup jobs. The data is stored so that it doesn’t interfere with unrelated jobs. +- **`metrics` (Optional, { field, metrics }[])**: Defines the metrics to collect for each grouping tuple. By default, only the doc_counts are collected for each +group. To make rollup useful, you will often add metrics like averages, mins, maxes, etc. Metrics are defined +on a per-field basis and for each field you configure which metric should be collected. +- **`timeout` (Optional, string | -1 | 0)**: Time to wait for the request to complete. +- **`headers` (Optional, Record)** + +## client.rollup.rollupSearch [_rollup.rollup_search] +Search rolled-up data. +The rollup search endpoint is needed because, internally, rolled-up documents utilize a different document structure than the original data. +It rewrites standard Query DSL into a format that matches the rollup documents then takes the response and rewrites it back to what a client would expect given the original query. + +The request body supports a subset of features from the regular search API. +The following functionality is not available: + +`size`: Because rollups work on pre-aggregated data, no search hits can be returned and so size must be set to zero or omitted entirely. +`highlighter`, `suggestors`, `post_filter`, `profile`, `explain`: These are similarly disallowed. -### Arguments [_arguments_249] - -* **Request (object):** - - * **`index` (Optional, string)**: Comma-separate list of data streams or indices to check. Wildcard (*) expressions are supported. - +**Searching both historical rollup and non-rollup data** +The rollup search API has the capability to search across both "live" non-rollup data and the aggregated rollup data. +This is done by simply adding the live indices to the URI. For example: -### get_feature_upgrade_status [_get_feature_upgrade_status] +``` +GET sensor-1,sensor_rollup/_rollup_search +{ + "size": 0, + "aggregations": { + "max_temperature": { + "max": { + "field": "temperature" + } + } + } +} +``` -Get feature migration information. Version upgrades sometimes require changes to how features store configuration information and data in system indices. Check which features need to be migrated and the status of any migrations that are in progress. +The rollup search endpoint does two things when the search runs: -::::{tip} -This API is designed for indirect use by the Upgrade Assistant. You are strongly recommended to use the Upgrade Assistant. -:::: +* The original request is sent to the non-rollup index unaltered. +* A rewritten version of the original request is sent to the rollup index. +When the two responses are received, the endpoint rewrites the rollup response and merges the two together. +During the merging process, if there is any overlap in buckets between the two responses, the buckets from the non-rollup index are used. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-migration-get-feature-upgrade-status) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-rollup-rollup-search) ```ts -client.migration.getFeatureUpgradeStatus() +client.rollup.rollupSearch({ index }) ``` +### Arguments [_arguments_rollup.rollup_search] -### post_feature_upgrade [_post_feature_upgrade] - -Start the feature migration. Version upgrades sometimes require changes to how features store configuration information and data in system indices. This API starts the automatic migration process. - -Some functionality might be temporarily unavailable during the migration process. +#### Request (object) [_request_rollup.rollup_search] +- **`index` (string | string[])**: A list of data streams and indices used to limit the request. +This parameter has the following rules: -::::{tip} -The API is designed for indirect use by the Upgrade Assistant. We strongly recommend you use the Upgrade Assistant. -:::: +* At least one data stream, index, or wildcard expression must be specified. This target can include a rollup or non-rollup index. For data streams, the stream's backing indices can only serve as non-rollup indices. Omitting the parameter or using `_all` are not permitted. +* Multiple non-rollup indices may be specified. +* Only one rollup index may be specified. If more than one are supplied, an exception occurs. +* Wildcard expressions (`*`) may be used. If they match more than one rollup index, an exception occurs. However, you can use an expression to match multiple non-rollup indices or data streams. +- **`aggregations` (Optional, Record)**: Specifies aggregations. +- **`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: Specifies a DSL query that is subject to some limitations. +- **`size` (Optional, number)**: Must be zero if set, as rollups work on pre-aggregated data. +- **`rest_total_hits_as_int` (Optional, boolean)**: Indicates whether hits.total should be rendered as an integer or an object in the rest search response +- **`typed_keys` (Optional, boolean)**: Specify whether aggregation and suggester names should be prefixed by their respective types in the response +## client.rollup.startJob [_rollup.start_job] +Start rollup jobs. +If you try to start a job that does not exist, an exception occurs. +If you try to start a job that is already started, nothing happens. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-migration-get-feature-upgrade-status) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-rollup-start-job) ```ts -client.migration.postFeatureUpgrade() +client.rollup.startJob({ id }) ``` +### Arguments [_arguments_rollup.start_job] -## ml [_ml] +#### Request (object) [_request_rollup.start_job] +- **`id` (string)**: Identifier for the rollup job. +## client.rollup.stopJob [_rollup.stop_job] +Stop rollup jobs. +If you try to stop a job that does not exist, an exception occurs. +If you try to stop a job that is already stopped, nothing happens. -### clear_trained_model_deployment_cache [_clear_trained_model_deployment_cache] +Since only a stopped job can be deleted, it can be useful to block the API until the indexer has fully stopped. +This is accomplished with the `wait_for_completion` query parameter, and optionally a timeout. For example: -Clear trained model deployment cache. Cache will be cleared on all nodes where the trained model is assigned. A trained model deployment may have an inference cache enabled. As requests are handled by each allocated node, their responses may be cached on that individual node. Calling this API clears the caches without restarting the deployment. +``` +POST _rollup/job/sensor/_stop?wait_for_completion=true&timeout=10s +``` +The parameter blocks the API call from returning until either the job has moved to STOPPED or the specified time has elapsed. +If the specified time elapses without the job moving to STOPPED, a timeout exception occurs. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-clear-trained-model-deployment-cache) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-rollup-stop-job) ```ts -client.ml.clearTrainedModelDeploymentCache({ model_id }) +client.rollup.stopJob({ id }) ``` +### Arguments [_arguments_rollup.stop_job] -### Arguments [_arguments_250] +#### Request (object) [_request_rollup.stop_job] +- **`id` (string)**: Identifier for the rollup job. +- **`timeout` (Optional, string | -1 | 0)**: If `wait_for_completion` is `true`, the API blocks for (at maximum) the specified duration while waiting for the job to stop. +If more than `timeout` time has passed, the API throws a timeout exception. +NOTE: Even if a timeout occurs, the stop request is still processing and eventually moves the job to STOPPED. +The timeout simply means the API call itself timed out while waiting for the status change. +- **`wait_for_completion` (Optional, boolean)**: If set to `true`, causes the API to block until the indexer state completely stops. +If set to `false`, the API returns immediately and the indexer is stopped asynchronously in the background. -* **Request (object):** +## client.searchApplication.delete [_search_application.delete] +Delete a search application. - * **`model_id` (string)**: The unique identifier of the trained model. +Remove a search application and its associated alias. Indices attached to the search application are not removed. +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-search-application-delete) + +```ts +client.searchApplication.delete({ name }) +``` +### Arguments [_arguments_search_application.delete] -### close_job [_close_job] +#### Request (object) [_request_search_application.delete] +- **`name` (string)**: The name of the search application to delete. -Close anomaly detection jobs. A job can be opened and closed multiple times throughout its lifecycle. A closed job cannot receive data or perform analysis operations, but you can still explore and navigate results. When you close a job, it runs housekeeping tasks such as pruning the model history, flushing buffers, calculating final results and persisting the model snapshots. Depending upon the size of the job, it could take several minutes to close and the equivalent time to re-open. After it is closed, the job has a minimal overhead on the cluster except for maintaining its meta data. Therefore it is a best practice to close jobs that are no longer required to process data. If you close an anomaly detection job whose datafeed is running, the request first tries to stop the datafeed. This behavior is equivalent to calling stop datafeed API with the same timeout and force parameters as the close job request. When a datafeed that has a specified end date stops, it automatically closes its associated job. +## client.searchApplication.deleteBehavioralAnalytics [_search_application.delete_behavioral_analytics] +Delete a behavioral analytics collection. +The associated data stream is also deleted. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-close-job) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-search-application-delete-behavioral-analytics) ```ts -client.ml.closeJob({ job_id }) +client.searchApplication.deleteBehavioralAnalytics({ name }) ``` +### Arguments [_arguments_search_application.delete_behavioral_analytics] -### Arguments [_arguments_251] +#### Request (object) [_request_search_application.delete_behavioral_analytics] +- **`name` (string)**: The name of the analytics collection to be deleted -* **Request (object):** +## client.searchApplication.get [_search_application.get] +Get search application details. - * **`job_id` (string)**: Identifier for the anomaly detection job. It can be a job identifier, a group name, or a wildcard expression. You can close multiple anomaly detection jobs in a single API request by using a group name, a list of jobs, or a wildcard expression. You can close all jobs by using `_all` or by specifying `*` as the job identifier. - * **`allow_no_match` (Optional, boolean)**: Refer to the description for the `allow_no_match` query parameter. - * **`force` (Optional, boolean)**: Refer to the descriptiion for the `force` query parameter. - * **`timeout` (Optional, string | -1 | 0)**: Refer to the description for the `timeout` query parameter. +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-search-application-get) +```ts +client.searchApplication.get({ name }) +``` +### Arguments [_arguments_search_application.get] -### delete_calendar [_delete_calendar] +#### Request (object) [_request_search_application.get] +- **`name` (string)**: The name of the search application -Delete a calendar. Removes all scheduled events from a calendar, then deletes it. +## client.searchApplication.getBehavioralAnalytics [_search_application.get_behavioral_analytics] +Get behavioral analytics collections. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-calendar) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-search-application-get-behavioral-analytics) ```ts -client.ml.deleteCalendar({ calendar_id }) +client.searchApplication.getBehavioralAnalytics({ ... }) ``` +### Arguments [_arguments_search_application.get_behavioral_analytics] -### Arguments [_arguments_252] +#### Request (object) [_request_search_application.get_behavioral_analytics] +- **`name` (Optional, string[])**: A list of analytics collections to limit the returned information -* **Request (object):** +## client.searchApplication.list [_search_application.list] +Get search applications. +Get information about search applications. - * **`calendar_id` (string)**: A string that uniquely identifies a calendar. +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-search-application-get-behavioral-analytics) +```ts +client.searchApplication.list({ ... }) +``` +### Arguments [_arguments_search_application.list] -### delete_calendar_event [_delete_calendar_event] +#### Request (object) [_request_search_application.list] +- **`q` (Optional, string)**: Query in the Lucene query string syntax. +- **`from` (Optional, number)**: Starting offset. +- **`size` (Optional, number)**: Specifies a max number of results to get. -Delete events from a calendar. +## client.searchApplication.postBehavioralAnalyticsEvent [_search_application.post_behavioral_analytics_event] +Create a behavioral analytics collection event. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-calendar-event) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-search-application-post-behavioral-analytics-event) ```ts -client.ml.deleteCalendarEvent({ calendar_id, event_id }) +client.searchApplication.postBehavioralAnalyticsEvent({ collection_name, event_type }) ``` +### Arguments [_arguments_search_application.post_behavioral_analytics_event] -### Arguments [_arguments_253] +#### Request (object) [_request_search_application.post_behavioral_analytics_event] +- **`collection_name` (string)**: The name of the behavioral analytics collection. +- **`event_type` (Enum("page_view" | "search" | "search_click"))**: The analytics event type. +- **`payload` (Optional, User-defined value)** +- **`debug` (Optional, boolean)**: Whether the response type has to include more details -* **Request (object):** +## client.searchApplication.put [_search_application.put] +Create or update a search application. - * **`calendar_id` (string)**: A string that uniquely identifies a calendar. - * **`event_id` (string)**: Identifier for the scheduled event. You can obtain this identifier by using the get calendar events API. +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-search-application-put) +```ts +client.searchApplication.put({ name }) +``` +### Arguments [_arguments_search_application.put] -### delete_calendar_job [_delete_calendar_job] +#### Request (object) [_request_search_application.put] +- **`name` (string)**: The name of the search application to be created or updated. +- **`search_application` (Optional, { indices, analytics_collection_name, template })** +- **`create` (Optional, boolean)**: If `true`, this request cannot replace or update existing Search Applications. -Delete anomaly jobs from a calendar. +## client.searchApplication.putBehavioralAnalytics [_search_application.put_behavioral_analytics] +Create a behavioral analytics collection. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-calendar-job) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-search-application-put-behavioral-analytics) ```ts -client.ml.deleteCalendarJob({ calendar_id, job_id }) +client.searchApplication.putBehavioralAnalytics({ name }) ``` +### Arguments [_arguments_search_application.put_behavioral_analytics] -### Arguments [_arguments_254] +#### Request (object) [_request_search_application.put_behavioral_analytics] +- **`name` (string)**: The name of the analytics collection to be created or updated. -* **Request (object):** +## client.searchApplication.renderQuery [_search_application.render_query] +Render a search application query. +Generate an Elasticsearch query using the specified query parameters and the search template associated with the search application or a default template if none is specified. +If a parameter used in the search template is not specified in `params`, the parameter's default value will be used. +The API returns the specific Elasticsearch query that would be generated and run by calling the search application search API. + +You must have `read` privileges on the backing alias of the search application. - * **`calendar_id` (string)**: A string that uniquely identifies a calendar. - * **`job_id` (string | string[])**: An identifier for the anomaly detection jobs. It can be a job identifier, a group name, or a list of jobs or groups. +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-search-application-render-query) +```ts +client.searchApplication.renderQuery({ name }) +``` +### Arguments [_arguments_search_application.render_query] -### delete_data_frame_analytics [_delete_data_frame_analytics] +#### Request (object) [_request_search_application.render_query] +- **`name` (string)**: The name of the search application to render teh query for. +- **`params` (Optional, Record)** -Delete a data frame analytics job. +## client.searchApplication.search [_search_application.search] +Run a search application search. +Generate and run an Elasticsearch query that uses the specified query parameteter and the search template associated with the search application or default template. +Unspecified template parameters are assigned their default values if applicable. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-data-frame-analytics) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-search-application-search) ```ts -client.ml.deleteDataFrameAnalytics({ id }) +client.searchApplication.search({ name }) ``` +### Arguments [_arguments_search_application.search] -### Arguments [_arguments_255] +#### Request (object) [_request_search_application.search] +- **`name` (string)**: The name of the search application to be searched. +- **`params` (Optional, Record)**: Query parameters specific to this request, which will override any defaults specified in the template. +- **`typed_keys` (Optional, boolean)**: Determines whether aggregation names are prefixed by their respective types in the response. -* **Request (object):** +## client.searchableSnapshots.cacheStats [_searchable_snapshots.cache_stats] +Get cache statistics. +Get statistics about the shared cache for partially mounted indices. - * **`id` (string)**: Identifier for the data frame analytics job. - * **`force` (Optional, boolean)**: If `true`, it deletes a job that is not stopped; this method is quicker than stopping and deleting the job. - * **`timeout` (Optional, string | -1 | 0)**: The time to wait for the job to be deleted. +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-searchable-snapshots-cache-stats) +```ts +client.searchableSnapshots.cacheStats({ ... }) +``` +### Arguments [_arguments_searchable_snapshots.cache_stats] -### delete_datafeed [_delete_datafeed] +#### Request (object) [_request_searchable_snapshots.cache_stats] +- **`node_id` (Optional, string | string[])**: The names of the nodes in the cluster to target. +- **`master_timeout` (Optional, string | -1 | 0)** -Delete a datafeed. +## client.searchableSnapshots.clearCache [_searchable_snapshots.clear_cache] +Clear the cache. +Clear indices and data streams from the shared cache for partially mounted indices. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-datafeed) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-searchable-snapshots-clear-cache) ```ts -client.ml.deleteDatafeed({ datafeed_id }) +client.searchableSnapshots.clearCache({ ... }) ``` +### Arguments [_arguments_searchable_snapshots.clear_cache] -### Arguments [_arguments_256] +#### Request (object) [_request_searchable_snapshots.clear_cache] +- **`index` (Optional, string | string[])**: A list of data streams, indices, and aliases to clear from the cache. +It supports wildcards (`*`). +- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Whether to expand wildcard expression to concrete indices that are open, closed or both. +- **`allow_no_indices` (Optional, boolean)**: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) +- **`ignore_unavailable` (Optional, boolean)**: Whether specified concrete indices should be ignored when unavailable (missing or closed) -* **Request (object):** +## client.searchableSnapshots.mount [_searchable_snapshots.mount] +Mount a snapshot. +Mount a snapshot as a searchable snapshot index. +Do not use this API for snapshots managed by index lifecycle management (ILM). +Manually mounting ILM-managed snapshots can interfere with ILM processes. - * **`datafeed_id` (string)**: A numerical character string that uniquely identifies the datafeed. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. - * **`force` (Optional, boolean)**: Use to forcefully delete a started datafeed; this method is quicker than stopping and deleting the datafeed. +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-searchable-snapshots-mount) +```ts +client.searchableSnapshots.mount({ repository, snapshot, index }) +``` +### Arguments [_arguments_searchable_snapshots.mount] -### delete_expired_data [_delete_expired_data] +#### Request (object) [_request_searchable_snapshots.mount] +- **`repository` (string)**: The name of the repository containing the snapshot of the index to mount. +- **`snapshot` (string)**: The name of the snapshot of the index to mount. +- **`index` (string)**: The name of the index contained in the snapshot whose data is to be mounted. +If no `renamed_index` is specified, this name will also be used to create the new index. +- **`renamed_index` (Optional, string)**: The name of the index that will be created. +- **`index_settings` (Optional, Record)**: The settings that should be added to the index when it is mounted. +- **`ignore_index_settings` (Optional, string[])**: The names of settings that should be removed from the index when it is mounted. +- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for the master node. +If the master node is not available before the timeout expires, the request fails and returns an error. +To indicate that the request should never timeout, set it to `-1`. +- **`wait_for_completion` (Optional, boolean)**: If true, the request blocks until the operation is complete. +- **`storage` (Optional, string)**: The mount option for the searchable snapshot index. -Delete expired ML data. Deletes all job results, model snapshots and forecast data that have exceeded their retention days period. Machine learning state documents that are not associated with any job are also deleted. You can limit the request to a single or set of anomaly detection jobs by using a job identifier, a group name, a list of jobs, or a wildcard expression. You can delete expired data for all anomaly detection jobs by using _all, by specifying * as the , or by omitting the . +## client.searchableSnapshots.stats [_searchable_snapshots.stats] +Get searchable snapshot statistics. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-expired-data) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-searchable-snapshots-stats) ```ts -client.ml.deleteExpiredData({ ... }) +client.searchableSnapshots.stats({ ... }) ``` +### Arguments [_arguments_searchable_snapshots.stats] -### Arguments [_arguments_257] +#### Request (object) [_request_searchable_snapshots.stats] +- **`index` (Optional, string | string[])**: A list of data streams and indices to retrieve statistics for. +- **`level` (Optional, Enum("cluster" | "indices" | "shards"))**: Return stats aggregated at cluster, index or shard level -* **Request (object):** - - * **`job_id` (Optional, string)**: Identifier for an anomaly detection job. It can be a job identifier, a group name, or a wildcard expression. - * **`requests_per_second` (Optional, float)**: The desired requests per second for the deletion processes. The default behavior is no throttling. - * **`timeout` (Optional, string | -1 | 0)**: How long can the underlying delete processes run until they are canceled. +## client.security.activateUserProfile [_security.activate_user_profile] +Activate a user profile. +Create or update a user profile on behalf of another user. +NOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions. +Individual users and external applications should not call this API directly. +The calling application must have either an `access_token` or a combination of `username` and `password` for the user that the profile document is intended for. +Elastic reserves the right to change or remove this feature in future releases without prior notice. -### delete_filter [_delete_filter] +This API creates or updates a profile document for end users with information that is extracted from the user's authentication object including `username`, `full_name,` `roles`, and the authentication realm. +For example, in the JWT `access_token` case, the profile user's `username` is extracted from the JWT token claim pointed to by the `claims.principal` setting of the JWT realm that authenticated the token. -Delete a filter. If an anomaly detection job references the filter, you cannot delete the filter. You must update or delete the job before you can delete the filter. +When updating a profile document, the API enables the document if it was disabled. +Any updates do not change existing content for either the `labels` or `data` fields. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-filter) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-security-activate-user-profile) ```ts -client.ml.deleteFilter({ filter_id }) +client.security.activateUserProfile({ grant_type }) ``` +### Arguments [_arguments_security.activate_user_profile] -### Arguments [_arguments_258] - -* **Request (object):** +#### Request (object) [_request_security.activate_user_profile] +- **`grant_type` (Enum("password" | "access_token"))**: The type of grant. +- **`access_token` (Optional, string)**: The user's Elasticsearch access token or JWT. +Both `access` and `id` JWT token types are supported and they depend on the underlying JWT realm configuration. +If you specify the `access_token` grant type, this parameter is required. +It is not valid with other grant types. +- **`password` (Optional, string)**: The user's password. +If you specify the `password` grant type, this parameter is required. +It is not valid with other grant types. +- **`username` (Optional, string)**: The username that identifies the user. +If you specify the `password` grant type, this parameter is required. +It is not valid with other grant types. - * **`filter_id` (string)**: A string that uniquely identifies a filter. - - - -### delete_forecast [_delete_forecast] +## client.security.authenticate [_security.authenticate] +Authenticate a user. -Delete forecasts from a job. By default, forecasts are retained for 14 days. You can specify a different retention period with the `expires_in` parameter in the forecast jobs API. The delete forecast API enables you to delete one or more forecasts before they expire. +Authenticates a user and returns information about the authenticated user. +Include the user information in a [basic auth header](https://en.wikipedia.org/wiki/Basic_access_authentication). +A successful call returns a JSON structure that shows user information such as their username, the roles that are assigned to the user, any assigned metadata, and information about the realms that authenticated and authorized the user. +If the user cannot be authenticated, this API returns a 401 status code. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-forecast) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-security-authenticate) ```ts -client.ml.deleteForecast({ job_id }) +client.security.authenticate() ``` -### Arguments [_arguments_259] +## client.security.bulkDeleteRole [_security.bulk_delete_role] +Bulk delete roles. + +The role management APIs are generally the preferred way to manage roles, rather than using file-based role management. +The bulk delete roles API cannot delete roles that are defined in roles files. -* **Request (object):** +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-security-bulk-delete-role) - * **`job_id` (string)**: Identifier for the anomaly detection job. - * **`forecast_id` (Optional, string)**: A list of forecast identifiers. If you do not specify this optional parameter or if you specify `_all` or `*` the API deletes all forecasts from the job. - * **`allow_no_forecasts` (Optional, boolean)**: Specifies whether an error occurs when there are no forecasts. In particular, if this parameter is set to `false` and there are no forecasts associated with the job, attempts to delete all forecasts return an error. - * **`timeout` (Optional, string | -1 | 0)**: Specifies the period of time to wait for the completion of the delete operation. When this period of time elapses, the API fails and returns an error. +```ts +client.security.bulkDeleteRole({ names }) +``` +### Arguments [_arguments_security.bulk_delete_role] +#### Request (object) [_request_security.bulk_delete_role] +- **`names` (string[])**: An array of role names to delete +- **`refresh` (Optional, Enum(true | false | "wait_for"))**: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. -### delete_job [_delete_job] +## client.security.bulkPutRole [_security.bulk_put_role] +Bulk create or update roles. -Delete an anomaly detection job. All job configuration, model state and results are deleted. It is not currently possible to delete multiple jobs using wildcards or a comma separated list. If you delete a job that has a datafeed, the request first tries to delete the datafeed. This behavior is equivalent to calling the delete datafeed API with the same timeout and force parameters as the delete job request. +The role management APIs are generally the preferred way to manage roles, rather than using file-based role management. +The bulk create or update roles API cannot update roles that are defined in roles files. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-job) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-security-bulk-put-role) ```ts -client.ml.deleteJob({ job_id }) +client.security.bulkPutRole({ roles }) ``` +### Arguments [_arguments_security.bulk_put_role] -### Arguments [_arguments_260] +#### Request (object) [_request_security.bulk_put_role] +- **`roles` (Record)**: A dictionary of role name to RoleDescriptor objects to add or update +- **`refresh` (Optional, Enum(true | false | "wait_for"))**: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. -* **Request (object):** +## client.security.bulkUpdateApiKeys [_security.bulk_update_api_keys] +Bulk update API keys. +Update the attributes for multiple API keys. - * **`job_id` (string)**: Identifier for the anomaly detection job. - * **`force` (Optional, boolean)**: Use to forcefully delete an opened job; this method is quicker than closing and deleting the job. - * **`delete_user_annotations` (Optional, boolean)**: Specifies whether annotations that have been added by the user should be deleted along with any auto-generated annotations when the job is reset. - * **`wait_for_completion` (Optional, boolean)**: Specifies whether the request should return immediately or wait until the job deletion completes. +IMPORTANT: It is not possible to use an API key as the authentication credential for this API. To update API keys, the owner user's credentials are required. + +This API is similar to the update API key API but enables you to apply the same update to multiple API keys in one API call. This operation can greatly improve performance over making individual updates. +It is not possible to update expired or invalidated API keys. +This API supports updates to API key access scope, metadata and expiration. +The access scope of each API key is derived from the `role_descriptors` you specify in the request and a snapshot of the owner user's permissions at the time of the request. +The snapshot of the owner's permissions is updated automatically on every call. -### delete_model_snapshot [_delete_model_snapshot] +IMPORTANT: If you don't specify `role_descriptors` in the request, a call to this API might still change an API key's access scope. This change can occur if the owner user's permissions have changed since the API key was created or last modified. -Delete a model snapshot. You cannot delete the active model snapshot. To delete that snapshot, first revert to a different one. To identify the active model snapshot, refer to the `model_snapshot_id` in the results from the get jobs API. +A successful request returns a JSON structure that contains the IDs of all updated API keys, the IDs of API keys that already had the requested changes and did not require an update, and error details for any failed update. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-model-snapshot) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-security-bulk-update-api-keys) ```ts -client.ml.deleteModelSnapshot({ job_id, snapshot_id }) +client.security.bulkUpdateApiKeys({ ids }) ``` +### Arguments [_arguments_security.bulk_update_api_keys] + +#### Request (object) [_request_security.bulk_update_api_keys] +- **`ids` (string | string[])**: The API key identifiers. +- **`expiration` (Optional, string | -1 | 0)**: Expiration time for the API keys. +By default, API keys never expire. +This property can be omitted to leave the value unchanged. +- **`metadata` (Optional, Record)**: Arbitrary nested metadata to associate with the API keys. +Within the `metadata` object, top-level keys beginning with an underscore (`_`) are reserved for system usage. +Any information specified with this parameter fully replaces metadata previously associated with the API key. +- **`role_descriptors` (Optional, Record)**: The role descriptors to assign to the API keys. +An API key's effective permissions are an intersection of its assigned privileges and the point-in-time snapshot of permissions of the owner user. +You can assign new privileges by specifying them in this parameter. +To remove assigned privileges, supply the `role_descriptors` parameter as an empty object `{}`. +If an API key has no assigned privileges, it inherits the owner user's full permissions. +The snapshot of the owner's permissions is always updated, whether you supply the `role_descriptors` parameter. +The structure of a role descriptor is the same as the request for the create API keys API. + +## client.security.changePassword [_security.change_password] +Change passwords. -### Arguments [_arguments_261] +Change the passwords of users in the native realm and built-in users. -* **Request (object):** +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-security-change-password) - * **`job_id` (string)**: Identifier for the anomaly detection job. - * **`snapshot_id` (string)**: Identifier for the model snapshot. +```ts +client.security.changePassword({ ... }) +``` +### Arguments [_arguments_security.change_password] +#### Request (object) [_request_security.change_password] +- **`username` (Optional, string)**: The user whose password you want to change. If you do not specify this +parameter, the password is changed for the current user. +- **`password` (Optional, string)**: The new password value. Passwords must be at least 6 characters long. +- **`password_hash` (Optional, string)**: A hash of the new password value. This must be produced using the same +hashing algorithm as has been configured for password storage. For more details, +see the explanation of the `xpack.security.authc.password_hashing.algorithm` +setting. +- **`refresh` (Optional, Enum(true | false | "wait_for"))**: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. -### delete_trained_model [_delete_trained_model] +## client.security.clearApiKeyCache [_security.clear_api_key_cache] +Clear the API key cache. -Delete an unreferenced trained model. The request deletes a trained inference model that is not referenced by an ingest pipeline. +Evict a subset of all entries from the API key cache. +The cache is also automatically cleared on state changes of the security index. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-trained-model) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-security-clear-api-key-cache) ```ts -client.ml.deleteTrainedModel({ model_id }) +client.security.clearApiKeyCache({ ids }) ``` +### Arguments [_arguments_security.clear_api_key_cache] -### Arguments [_arguments_262] +#### Request (object) [_request_security.clear_api_key_cache] +- **`ids` (string | string[])**: List of API key IDs to evict from the API key cache. +To evict all API keys, use `*`. +Does not support other wildcard patterns. -* **Request (object):** - - * **`model_id` (string)**: The unique identifier of the trained model. - * **`force` (Optional, boolean)**: Forcefully deletes a trained model that is referenced by ingest pipelines or has a started deployment. - * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - - - -### delete_trained_model_alias [_delete_trained_model_alias] +## client.security.clearCachedPrivileges [_security.clear_cached_privileges] +Clear the privileges cache. -Delete a trained model alias. This API deletes an existing model alias that refers to a trained model. If the model alias is missing or refers to a model other than the one identified by the `model_id`, this API returns an error. +Evict privileges from the native application privilege cache. +The cache is also automatically cleared for applications that have their privileges updated. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-trained-model-alias) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-security-clear-cached-privileges) ```ts -client.ml.deleteTrainedModelAlias({ model_alias, model_id }) +client.security.clearCachedPrivileges({ application }) ``` +### Arguments [_arguments_security.clear_cached_privileges] -### Arguments [_arguments_263] +#### Request (object) [_request_security.clear_cached_privileges] +- **`application` (string)**: A list of applications. +To clear all applications, use an asterism (`*`). +It does not support other wildcard patterns. -* **Request (object):** +## client.security.clearCachedRealms [_security.clear_cached_realms] +Clear the user cache. - * **`model_alias` (string)**: The model alias to delete. - * **`model_id` (string)**: The trained model ID to which the model alias refers. +Evict users from the user cache. +You can completely clear the cache or evict specific users. +User credentials are cached in memory on each node to avoid connecting to a remote authentication service or hitting the disk for every incoming request. +There are realm settings that you can use to configure the user cache. +For more information, refer to the documentation about controlling the user cache. +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-security-clear-cached-realms) -### estimate_model_memory [_estimate_model_memory] +```ts +client.security.clearCachedRealms({ realms }) +``` -Estimate job model memory usage. Makes an estimation of the memory usage for an anomaly detection job model. It is based on analysis configuration details for the job and cardinality estimates for the fields it references. +### Arguments [_arguments_security.clear_cached_realms] -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-ml) +#### Request (object) [_request_security.clear_cached_realms] +- **`realms` (string | string[])**: A list of realms. +To clear all realms, use an asterisk (`*`). +It does not support other wildcard patterns. +- **`usernames` (Optional, string[])**: A list of the users to clear from the cache. +If you do not specify this parameter, the API evicts all users from the user cache. -```ts -client.ml.estimateModelMemory({ ... }) -``` +## client.security.clearCachedRoles [_security.clear_cached_roles] +Clear the roles cache. +Evict roles from the native role cache. -### Arguments [_arguments_264] +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-security-clear-cached-roles) -* **Request (object):** +```ts +client.security.clearCachedRoles({ name }) +``` - * **`analysis_config` (Optional, { bucket_span, categorization_analyzer, categorization_field_name, categorization_filters, detectors, influencers, latency, model_prune_window, multivariate_by_fields, per_partition_categorization, summary_count_field_name })**: For a list of the properties that you can specify in the `analysis_config` component of the body of this API. - * **`max_bucket_cardinality` (Optional, Record)**: Estimates of the highest cardinality in a single bucket that is observed for influencer fields over the time period that the job analyzes data. To produce a good answer, values must be provided for all influencer fields. Providing values for fields that are not listed as `influencers` has no effect on the estimation. - * **`overall_cardinality` (Optional, Record)**: Estimates of the cardinality that is observed for fields over the whole time period that the job analyzes data. To produce a good answer, values must be provided for fields referenced in the `by_field_name`, `over_field_name` and `partition_field_name` of any detectors. Providing values for other fields has no effect on the estimation. It can be omitted from the request if no detectors have a `by_field_name`, `over_field_name` or `partition_field_name`. +### Arguments [_arguments_security.clear_cached_roles] +#### Request (object) [_request_security.clear_cached_roles] +- **`name` (string | string[])**: A list of roles to evict from the role cache. +To evict all roles, use an asterisk (`*`). +It does not support other wildcard patterns. +## client.security.clearCachedServiceTokens [_security.clear_cached_service_tokens] +Clear service account token caches. -### evaluate_data_frame [_evaluate_data_frame] +Evict a subset of all entries from the service account token caches. +Two separate caches exist for service account tokens: one cache for tokens backed by the `service_tokens` file, and another for tokens backed by the `.security` index. +This API clears matching entries from both caches. -Evaluate data frame analytics. The API packages together commonly used evaluation metrics for various types of machine learning features. This has been designed for use on indexes created by data frame analytics. Evaluation requires both a ground truth field and an analytics result field to be present. +The cache for service account tokens backed by the `.security` index is cleared automatically on state changes of the security index. +The cache for tokens backed by the `service_tokens` file is cleared automatically on file changes. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-evaluate-data-frame) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-security-clear-cached-service-tokens) ```ts -client.ml.evaluateDataFrame({ evaluation, index }) +client.security.clearCachedServiceTokens({ namespace, service, name }) ``` +### Arguments [_arguments_security.clear_cached_service_tokens] -### Arguments [_arguments_265] +#### Request (object) [_request_security.clear_cached_service_tokens] +- **`namespace` (string)**: The namespace, which is a top-level grouping of service accounts. +- **`service` (string)**: The name of the service, which must be unique within its namespace. +- **`name` (string | string[])**: A list of token names to evict from the service account token caches. +Use a wildcard (`*`) to evict all tokens that belong to a service account. +It does not support other wildcard patterns. -* **Request (object):** +## client.security.createApiKey [_security.create_api_key] +Create an API key. - * **`evaluation` ({ classification, outlier_detection, regression })**: Defines the type of evaluation you want to perform. - * **`index` (string)**: Defines the `index` in which the evaluation will be performed. - * **`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: A query clause that retrieves a subset of data from the source index. +Create an API key for access without requiring basic authentication. +IMPORTANT: If the credential that is used to authenticate this request is an API key, the derived API key cannot have any privileges. +If you specify privileges, the API returns an error. +A successful request returns a JSON structure that contains the API key, its unique id, and its name. +If applicable, it also returns expiration information for the API key in milliseconds. -### explain_data_frame_analytics [_explain_data_frame_analytics] +NOTE: By default, API keys never expire. You can specify expiration information when you create the API keys. -Explain data frame analytics config. This API provides explanations for a data frame analytics config that either exists already or one that has not been created yet. The following explanations are provided: * which fields are included or not in the analysis and why, * how much memory is estimated to be required. The estimate can be used when deciding the appropriate value for model_memory_limit setting later on. If you have object fields or fields that are excluded via source filtering, they are not included in the explanation. +The API keys are created by the Elasticsearch API key service, which is automatically enabled. +To configure or turn off the API key service, refer to API key service setting documentation. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-explain-data-frame-analytics) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-security-create-api-key) ```ts -client.ml.explainDataFrameAnalytics({ ... }) +client.security.createApiKey({ ... }) ``` +### Arguments [_arguments_security.create_api_key] -### Arguments [_arguments_266] +#### Request (object) [_request_security.create_api_key] +- **`expiration` (Optional, string | -1 | 0)**: The expiration time for the API key. +By default, API keys never expire. +- **`name` (Optional, string)**: A name for the API key. +- **`role_descriptors` (Optional, Record)**: An array of role descriptors for this API key. +When it is not specified or it is an empty array, the API key will have a point in time snapshot of permissions of the authenticated user. +If you supply role descriptors, the resultant permissions are an intersection of API keys permissions and the authenticated user's permissions thereby limiting the access scope for API keys. +The structure of role descriptor is the same as the request for the create role API. +For more details, refer to the create or update roles API. -* **Request (object):** +NOTE: Due to the way in which this permission intersection is calculated, it is not possible to create an API key that is a child of another API key, unless the derived key is created without any privileges. +In this case, you must explicitly specify a role descriptor with no privileges. +The derived API key can be used for authentication; it will not have authority to call Elasticsearch APIs. +- **`metadata` (Optional, Record)**: Arbitrary metadata that you want to associate with the API key. It supports nested data structure. Within the metadata object, keys beginning with `_` are reserved for system usage. +- **`refresh` (Optional, Enum(true | false | "wait_for"))**: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. - * **`id` (Optional, string)**: Identifier for the data frame analytics job. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. - * **`source` (Optional, { index, query, runtime_mappings, _source })**: The configuration of how to source the analysis data. It requires an index. Optionally, query and _source may be specified. - * **`dest` (Optional, { index, results_field })**: The destination configuration, consisting of index and optionally results_field (ml by default). - * **`analysis` (Optional, { classification, outlier_detection, regression })**: The analysis configuration, which contains the information necessary to perform one of the following types of analysis: classification, outlier detection, or regression. - * **`description` (Optional, string)**: A description of the job. - * **`model_memory_limit` (Optional, string)**: The approximate maximum amount of memory resources that are permitted for analytical processing. If your `elasticsearch.yml` file contains an `xpack.ml.max_model_memory_limit` setting, an error occurs when you try to create data frame analytics jobs that have `model_memory_limit` values greater than that setting. - * **`max_num_threads` (Optional, number)**: The maximum number of threads to be used by the analysis. Using more threads may decrease the time necessary to complete the analysis at the cost of using more CPU. Note that the process may use additional threads for operational functionality other than the analysis itself. - * **`analyzed_fields` (Optional, { includes, excludes })**: Specify includes and/or excludes patterns to select which fields will be included in the analysis. The patterns specified in excludes are applied last, therefore excludes takes precedence. In other words, if the same field is specified in both includes and excludes, then the field will not be included in the analysis. - * **`allow_lazy_start` (Optional, boolean)**: Specifies whether this job can start when there is insufficient machine learning node capacity for it to be immediately assigned to a node. +## client.security.createCrossClusterApiKey [_security.create_cross_cluster_api_key] +Create a cross-cluster API key. +Create an API key of the `cross_cluster` type for the API key based remote cluster access. +A `cross_cluster` API key cannot be used to authenticate through the REST interface. +IMPORTANT: To authenticate this request you must use a credential that is not an API key. Even if you use an API key that has the required privilege, the API returns an error. -### flush_job [_flush_job] +Cross-cluster API keys are created by the Elasticsearch API key service, which is automatically enabled. -Force buffered data to be processed. The flush jobs API is only applicable when sending data for analysis using the post data API. Depending on the content of the buffer, then it might additionally calculate new results. Both flush and close operations are similar, however the flush is more efficient if you are expecting to send more data for analysis. When flushing, the job remains open and is available to continue analyzing data. A close operation additionally prunes and persists the model state to disk and the job must be opened again before analyzing further data. +NOTE: Unlike REST API keys, a cross-cluster API key does not capture permissions of the authenticated user. The API key’s effective permission is exactly as specified with the `access` property. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-flush-job) +A successful request returns a JSON structure that contains the API key, its unique ID, and its name. If applicable, it also returns expiration information for the API key in milliseconds. -```ts -client.ml.flushJob({ job_id }) -``` +By default, API keys never expire. You can specify expiration information when you create the API keys. +Cross-cluster API keys can only be updated with the update cross-cluster API key API. +Attempting to update them with the update REST API key API or the bulk update REST API keys API will result in an error. -### Arguments [_arguments_267] +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-security-create-cross-cluster-api-key) -* **Request (object):** +```ts +client.security.createCrossClusterApiKey({ access, name }) +``` - * **`job_id` (string)**: Identifier for the anomaly detection job. - * **`advance_time` (Optional, string | Unit)**: Refer to the description for the `advance_time` query parameter. - * **`calc_interim` (Optional, boolean)**: Refer to the description for the `calc_interim` query parameter. - * **`end` (Optional, string | Unit)**: Refer to the description for the `end` query parameter. - * **`skip_time` (Optional, string | Unit)**: Refer to the description for the `skip_time` query parameter. - * **`start` (Optional, string | Unit)**: Refer to the description for the `start` query parameter. +### Arguments [_arguments_security.create_cross_cluster_api_key] +#### Request (object) [_request_security.create_cross_cluster_api_key] +- **`access` ({ replication, search })**: The access to be granted to this API key. +The access is composed of permissions for cross-cluster search and cross-cluster replication. +At least one of them must be specified. +NOTE: No explicit privileges should be specified for either search or replication access. +The creation process automatically converts the access specification to a role descriptor which has relevant privileges assigned accordingly. +- **`name` (string)**: Specifies the name for this API key. +- **`expiration` (Optional, string | -1 | 0)**: Expiration time for the API key. +By default, API keys never expire. +- **`metadata` (Optional, Record)**: Arbitrary metadata that you want to associate with the API key. +It supports nested data structure. +Within the metadata object, keys beginning with `_` are reserved for system usage. -### forecast [_forecast] +## client.security.createServiceToken [_security.create_service_token] +Create a service account token. -Predict future behavior of a time series. +Create a service accounts token for access without requiring basic authentication. -Forecasts are not supported for jobs that perform population analysis; an error occurs if you try to create a forecast for a job that has an `over_field_name` in its configuration. Forcasts predict future behavior based on historical data. +NOTE: Service account tokens never expire. +You must actively delete them if they are no longer needed. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-forecast) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-security-create-service-token) ```ts -client.ml.forecast({ job_id }) +client.security.createServiceToken({ namespace, service }) ``` +### Arguments [_arguments_security.create_service_token] -### Arguments [_arguments_268] +#### Request (object) [_request_security.create_service_token] +- **`namespace` (string)**: The name of the namespace, which is a top-level grouping of service accounts. +- **`service` (string)**: The name of the service. +- **`name` (Optional, string)**: The name for the service account token. +If omitted, a random name will be generated. -* **Request (object):** +Token names must be at least one and no more than 256 characters. +They can contain alphanumeric characters (a-z, A-Z, 0-9), dashes (`-`), and underscores (`_`), but cannot begin with an underscore. - * **`job_id` (string)**: Identifier for the anomaly detection job. The job must be open when you create a forecast; otherwise, an error occurs. - * **`duration` (Optional, string | -1 | 0)**: Refer to the description for the `duration` query parameter. - * **`expires_in` (Optional, string | -1 | 0)**: Refer to the description for the `expires_in` query parameter. - * **`max_model_memory` (Optional, string)**: Refer to the description for the `max_model_memory` query parameter. +NOTE: Token names must be unique in the context of the associated service account. +They must also be globally unique with their fully qualified names, which are comprised of the service account principal and token name, such as `//`. +- **`refresh` (Optional, Enum(true | false | "wait_for"))**: If `true` then refresh the affected shards to make this operation visible to search, if `wait_for` (the default) then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. +## client.security.delegatePki [_security.delegate_pki] +Delegate PKI authentication. +This API implements the exchange of an X509Certificate chain for an Elasticsearch access token. +The certificate chain is validated, according to RFC 5280, by sequentially considering the trust configuration of every installed PKI realm that has `delegation.enabled` set to `true`. +A successfully trusted client certificate is also subject to the validation of the subject distinguished name according to thw `username_pattern` of the respective realm. -### get_buckets [_get_buckets] +This API is called by smart and trusted proxies, such as Kibana, which terminate the user's TLS session but still want to authenticate the user by using a PKI realm—-​as if the user connected directly to Elasticsearch. -Get anomaly detection job results for buckets. The API presents a chronological view of the records, grouped by bucket. +IMPORTANT: The association between the subject public key in the target certificate and the corresponding private key is not validated. +This is part of the TLS authentication process and it is delegated to the proxy that calls this API. +The proxy is trusted to have performed the TLS authentication and this API translates that authentication into an Elasticsearch access token. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-buckets) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-security-delegate-pki) ```ts -client.ml.getBuckets({ job_id }) +client.security.delegatePki({ x509_certificate_chain }) ``` +### Arguments [_arguments_security.delegate_pki] -### Arguments [_arguments_269] - -* **Request (object):** - - * **`job_id` (string)**: Identifier for the anomaly detection job. - * **`timestamp` (Optional, string | Unit)**: The timestamp of a single bucket result. If you do not specify this parameter, the API returns information about all buckets. - * **`anomaly_score` (Optional, number)**: Refer to the description for the `anomaly_score` query parameter. - * **`desc` (Optional, boolean)**: Refer to the description for the `desc` query parameter. - * **`end` (Optional, string | Unit)**: Refer to the description for the `end` query parameter. - * **`exclude_interim` (Optional, boolean)**: Refer to the description for the `exclude_interim` query parameter. - * **`expand` (Optional, boolean)**: Refer to the description for the `expand` query parameter. - * **`page` (Optional, { from, size })** - * **`sort` (Optional, string)**: Refer to the desription for the `sort` query parameter. - * **`start` (Optional, string | Unit)**: Refer to the description for the `start` query parameter. - * **`from` (Optional, number)**: Skips the specified number of buckets. - * **`size` (Optional, number)**: Specifies the maximum number of buckets to obtain. +#### Request (object) [_request_security.delegate_pki] +- **`x509_certificate_chain` (string[])**: The X509Certificate chain, which is represented as an ordered string array. +Each string in the array is a base64-encoded (Section 4 of RFC4648 - not base64url-encoded) of the certificate's DER encoding. +The first element is the target certificate that contains the subject distinguished name that is requesting access. +This may be followed by additional certificates; each subsequent certificate is used to certify the previous one. +## client.security.deletePrivileges [_security.delete_privileges] +Delete application privileges. -### get_calendar_events [_get_calendar_events] +To use this API, you must have one of the following privileges: -Get info about events in calendars. +* The `manage_security` cluster privilege (or a greater privilege such as `all`). +* The "Manage Application Privileges" global privilege for the application being referenced in the request. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-calendar-events) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-security-delete-privileges) ```ts -client.ml.getCalendarEvents({ calendar_id }) +client.security.deletePrivileges({ application, name }) ``` +### Arguments [_arguments_security.delete_privileges] -### Arguments [_arguments_270] - -* **Request (object):** - - * **`calendar_id` (string)**: A string that uniquely identifies a calendar. You can get information for multiple calendars by using a list of ids or a wildcard expression. You can get information for all calendars by using `_all` or `*` or by omitting the calendar identifier. - * **`end` (Optional, string | Unit)**: Specifies to get events with timestamps earlier than this time. - * **`from` (Optional, number)**: Skips the specified number of events. - * **`job_id` (Optional, string)**: Specifies to get events for a specific anomaly detection job identifier or job group. It must be used with a calendar identifier of `_all` or `*`. - * **`size` (Optional, number)**: Specifies the maximum number of events to obtain. - * **`start` (Optional, string | Unit)**: Specifies to get events with timestamps after this time. +#### Request (object) [_request_security.delete_privileges] +- **`application` (string)**: The name of the application. +Application privileges are always associated with exactly one application. +- **`name` (string | string[])**: The name of the privilege. +- **`refresh` (Optional, Enum(true | false | "wait_for"))**: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. +## client.security.deleteRole [_security.delete_role] +Delete roles. +Delete roles in the native realm. +The role management APIs are generally the preferred way to manage roles, rather than using file-based role management. +The delete roles API cannot remove roles that are defined in roles files. -### get_calendars [_get_calendars] - -Get calendar configuration info. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-calendars) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-security-delete-role) ```ts -client.ml.getCalendars({ ... }) +client.security.deleteRole({ name }) ``` +### Arguments [_arguments_security.delete_role] -### Arguments [_arguments_271] - -* **Request (object):** - - * **`calendar_id` (Optional, string)**: A string that uniquely identifies a calendar. You can get information for multiple calendars by using a list of ids or a wildcard expression. You can get information for all calendars by using `_all` or `*` or by omitting the calendar identifier. - * **`page` (Optional, { from, size })**: This object is supported only when you omit the calendar identifier. - * **`from` (Optional, number)**: Skips the specified number of calendars. This parameter is supported only when you omit the calendar identifier. - * **`size` (Optional, number)**: Specifies the maximum number of calendars to obtain. This parameter is supported only when you omit the calendar identifier. - +#### Request (object) [_request_security.delete_role] +- **`name` (string)**: The name of the role. +- **`refresh` (Optional, Enum(true | false | "wait_for"))**: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. +## client.security.deleteRoleMapping [_security.delete_role_mapping] +Delete role mappings. -### get_categories [_get_categories] - -Get anomaly detection job results for categories. +Role mappings define which roles are assigned to each user. +The role mapping APIs are generally the preferred way to manage role mappings rather than using role mapping files. +The delete role mappings API cannot remove role mappings that are defined in role mapping files. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-categories) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-security-delete-role-mapping) ```ts -client.ml.getCategories({ job_id }) +client.security.deleteRoleMapping({ name }) ``` +### Arguments [_arguments_security.delete_role_mapping] -### Arguments [_arguments_272] - -* **Request (object):** - - * **`job_id` (string)**: Identifier for the anomaly detection job. - * **`category_id` (Optional, string)**: Identifier for the category, which is unique in the job. If you specify neither the category ID nor the partition_field_value, the API returns information about all categories. If you specify only the partition_field_value, it returns information about all categories for the specified partition. - * **`page` (Optional, { from, size })**: Configures pagination. This parameter has the `from` and `size` properties. - * **`from` (Optional, number)**: Skips the specified number of categories. - * **`partition_field_value` (Optional, string)**: Only return categories for the specified partition. - * **`size` (Optional, number)**: Specifies the maximum number of categories to obtain. - +#### Request (object) [_request_security.delete_role_mapping] +- **`name` (string)**: The distinct name that identifies the role mapping. +The name is used solely as an identifier to facilitate interaction via the API; it does not affect the behavior of the mapping in any way. +- **`refresh` (Optional, Enum(true | false | "wait_for"))**: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. +## client.security.deleteServiceToken [_security.delete_service_token] +Delete service account tokens. -### get_data_frame_analytics [_get_data_frame_analytics] - -Get data frame analytics job configuration info. You can get information for multiple data frame analytics jobs in a single API request by using a list of data frame analytics jobs or a wildcard expression. +Delete service account tokens for a service in a specified namespace. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-data-frame-analytics) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-security-delete-service-token) ```ts -client.ml.getDataFrameAnalytics({ ... }) +client.security.deleteServiceToken({ namespace, service, name }) ``` +### Arguments [_arguments_security.delete_service_token] -### Arguments [_arguments_273] - -* **Request (object):** - - * **`id` (Optional, string)**: Identifier for the data frame analytics job. If you do not specify this option, the API returns information for the first hundred data frame analytics jobs. - * **`allow_no_match` (Optional, boolean)**: Specifies what to do when the request: - - 1. Contains wildcard expressions and there are no data frame analytics jobs that match. - 2. Contains the `_all` string or no identifiers and there are no matches. - 3. Contains wildcard expressions and there are only partial matches. +#### Request (object) [_request_security.delete_service_token] +- **`namespace` (string)**: The namespace, which is a top-level grouping of service accounts. +- **`service` (string)**: The service name. +- **`name` (string)**: The name of the service account token. +- **`refresh` (Optional, Enum(true | false | "wait_for"))**: If `true` then refresh the affected shards to make this operation visible to search, if `wait_for` (the default) then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. +## client.security.deleteUser [_security.delete_user] +Delete users. -The default value returns an empty data_frame_analytics array when there are no matches and the subset of results when there are partial matches. If this parameter is `false`, the request returns a 404 status code when there are no matches or only partial matches. ** *`from` (Optional, number)**: Skips the specified number of data frame analytics jobs. *** *`size` (Optional, number)**: Specifies the maximum number of data frame analytics jobs to obtain. ** *`exclude_generated` (Optional, boolean)**: Indicates if certain fields should be removed from the configuration on retrieval. This allows the configuration to be in an acceptable format to be retrieved and then added to another cluster. - - -### get_data_frame_analytics_stats [_get_data_frame_analytics_stats] - -Get data frame analytics jobs usage info. +Delete users from the native realm. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-data-frame-analytics-stats) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-security-delete-user) ```ts -client.ml.getDataFrameAnalyticsStats({ ... }) +client.security.deleteUser({ username }) ``` +### Arguments [_arguments_security.delete_user] -### Arguments [_arguments_274] - -* **Request (object):** +#### Request (object) [_request_security.delete_user] +- **`username` (string)**: An identifier for the user. +- **`refresh` (Optional, Enum(true | false | "wait_for"))**: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. - * **`id` (Optional, string)**: Identifier for the data frame analytics job. If you do not specify this option, the API returns information for the first hundred data frame analytics jobs. - * **`allow_no_match` (Optional, boolean)**: Specifies what to do when the request: - - 1. Contains wildcard expressions and there are no data frame analytics jobs that match. - 2. Contains the `_all` string or no identifiers and there are no matches. - 3. Contains wildcard expressions and there are only partial matches. - - -The default value returns an empty data_frame_analytics array when there are no matches and the subset of results when there are partial matches. If this parameter is `false`, the request returns a 404 status code when there are no matches or only partial matches. ** *`from` (Optional, number)**: Skips the specified number of data frame analytics jobs. *** *`size` (Optional, number)**: Specifies the maximum number of data frame analytics jobs to obtain. ** *`verbose` (Optional, boolean)**: Defines whether the stats response should be verbose. - - -### get_datafeed_stats [_get_datafeed_stats] +## client.security.disableUser [_security.disable_user] +Disable users. -Get datafeeds usage info. You can get statistics for multiple datafeeds in a single API request by using a list of datafeeds or a wildcard expression. You can get statistics for all datafeeds by using `_all`, by specifying `*` as the ``, or by omitting the ``. If the datafeed is stopped, the only information you receive is the `datafeed_id` and the `state`. This API returns a maximum of 10,000 datafeeds. +Disable users in the native realm. +By default, when you create users, they are enabled. +You can use this API to revoke a user's access to Elasticsearch. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-datafeed-stats) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-security-disable-user) ```ts -client.ml.getDatafeedStats({ ... }) +client.security.disableUser({ username }) ``` +### Arguments [_arguments_security.disable_user] -### Arguments [_arguments_275] +#### Request (object) [_request_security.disable_user] +- **`username` (string)**: An identifier for the user. +- **`refresh` (Optional, Enum(true | false | "wait_for"))**: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. -* **Request (object):** - - * **`datafeed_id` (Optional, string | string[])**: Identifier for the datafeed. It can be a datafeed identifier or a wildcard expression. If you do not specify one of these options, the API returns information about all datafeeds. - * **`allow_no_match` (Optional, boolean)**: Specifies what to do when the request: - - 1. Contains wildcard expressions and there are no datafeeds that match. - 2. Contains the `_all` string or no identifiers and there are no matches. - 3. Contains wildcard expressions and there are only partial matches. - - -The default value is `true`, which returns an empty `datafeeds` array when there are no matches and the subset of results when there are partial matches. If this parameter is `false`, the request returns a `404` status code when there are no matches or only partial matches. +## client.security.disableUserProfile [_security.disable_user_profile] +Disable a user profile. +Disable user profiles so that they are not visible in user profile searches. -### get_datafeeds [_get_datafeeds] +NOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions. +Individual users and external applications should not call this API directly. +Elastic reserves the right to change or remove this feature in future releases without prior notice. -Get datafeeds configuration info. You can get information for multiple datafeeds in a single API request by using a list of datafeeds or a wildcard expression. You can get information for all datafeeds by using `_all`, by specifying `*` as the ``, or by omitting the ``. This API returns a maximum of 10,000 datafeeds. +When you activate a user profile, its automatically enabled and visible in user profile searches. You can use the disable user profile API to disable a user profile so it’s not visible in these searches. +To re-enable a disabled user profile, use the enable user profile API . -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-datafeeds) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-security-disable-user-profile) ```ts -client.ml.getDatafeeds({ ... }) +client.security.disableUserProfile({ uid }) ``` +### Arguments [_arguments_security.disable_user_profile] -### Arguments [_arguments_276] +#### Request (object) [_request_security.disable_user_profile] +- **`uid` (string)**: Unique identifier for the user profile. +- **`refresh` (Optional, Enum(true | false | "wait_for"))**: If 'true', Elasticsearch refreshes the affected shards to make this operation visible to search. +If 'wait_for', it waits for a refresh to make this operation visible to search. +If 'false', it does nothing with refreshes. -* **Request (object):** - - * **`datafeed_id` (Optional, string | string[])**: Identifier for the datafeed. It can be a datafeed identifier or a wildcard expression. If you do not specify one of these options, the API returns information about all datafeeds. - * **`allow_no_match` (Optional, boolean)**: Specifies what to do when the request: - - 1. Contains wildcard expressions and there are no datafeeds that match. - 2. Contains the `_all` string or no identifiers and there are no matches. - 3. Contains wildcard expressions and there are only partial matches. - - -The default value is `true`, which returns an empty `datafeeds` array when there are no matches and the subset of results when there are partial matches. If this parameter is `false`, the request returns a `404` status code when there are no matches or only partial matches. *** *`exclude_generated` (Optional, boolean)**: Indicates if certain fields should be removed from the configuration on retrieval. This allows the configuration to be in an acceptable format to be retrieved and then added to another cluster. - - -### get_filters [_get_filters] +## client.security.enableUser [_security.enable_user] +Enable users. -Get filters. You can get a single filter or all filters. +Enable users in the native realm. +By default, when you create users, they are enabled. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-filters) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-security-enable-user) ```ts -client.ml.getFilters({ ... }) +client.security.enableUser({ username }) ``` +### Arguments [_arguments_security.enable_user] -### Arguments [_arguments_277] - -* **Request (object):** - - * **`filter_id` (Optional, string | string[])**: A string that uniquely identifies a filter. - * **`from` (Optional, number)**: Skips the specified number of filters. - * **`size` (Optional, number)**: Specifies the maximum number of filters to obtain. +#### Request (object) [_request_security.enable_user] +- **`username` (string)**: An identifier for the user. +- **`refresh` (Optional, Enum(true | false | "wait_for"))**: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. +## client.security.enableUserProfile [_security.enable_user_profile] +Enable a user profile. +Enable user profiles to make them visible in user profile searches. -### get_influencers [_get_influencers] +NOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions. +Individual users and external applications should not call this API directly. +Elastic reserves the right to change or remove this feature in future releases without prior notice. -Get anomaly detection job results for influencers. Influencers are the entities that have contributed to, or are to blame for, the anomalies. Influencer results are available only if an `influencer_field_name` is specified in the job configuration. +When you activate a user profile, it's automatically enabled and visible in user profile searches. +If you later disable the user profile, you can use the enable user profile API to make the profile visible in these searches again. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-influencers) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-security-enable-user-profile) ```ts -client.ml.getInfluencers({ job_id }) +client.security.enableUserProfile({ uid }) ``` +### Arguments [_arguments_security.enable_user_profile] -### Arguments [_arguments_278] - -* **Request (object):** - - * **`job_id` (string)**: Identifier for the anomaly detection job. - * **`page` (Optional, { from, size })**: Configures pagination. This parameter has the `from` and `size` properties. - * **`desc` (Optional, boolean)**: If true, the results are sorted in descending order. - * **`end` (Optional, string | Unit)**: Returns influencers with timestamps earlier than this time. The default value means it is unset and results are not limited to specific timestamps. - * **`exclude_interim` (Optional, boolean)**: If true, the output excludes interim results. By default, interim results are included. - * **`influencer_score` (Optional, number)**: Returns influencers with anomaly scores greater than or equal to this value. - * **`from` (Optional, number)**: Skips the specified number of influencers. - * **`size` (Optional, number)**: Specifies the maximum number of influencers to obtain. - * **`sort` (Optional, string)**: Specifies the sort field for the requested influencers. By default, the influencers are sorted by the `influencer_score` value. - * **`start` (Optional, string | Unit)**: Returns influencers with timestamps after this time. The default value means it is unset and results are not limited to specific timestamps. - +#### Request (object) [_request_security.enable_user_profile] +- **`uid` (string)**: A unique identifier for the user profile. +- **`refresh` (Optional, Enum(true | false | "wait_for"))**: If 'true', Elasticsearch refreshes the affected shards to make this operation +visible to search. +If 'wait_for', it waits for a refresh to make this operation visible to search. +If 'false', nothing is done with refreshes. +## client.security.enrollKibana [_security.enroll_kibana] +Enroll Kibana. -### get_job_stats [_get_job_stats] +Enable a Kibana instance to configure itself for communication with a secured Elasticsearch cluster. -Get anomaly detection jobs usage info. +NOTE: This API is currently intended for internal use only by Kibana. +Kibana uses this API internally to configure itself for communications with an Elasticsearch cluster that already has security features enabled. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-job-stats) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-security-enroll-kibana) ```ts -client.ml.getJobStats({ ... }) +client.security.enrollKibana() ``` -### Arguments [_arguments_279] - -* **Request (object):** - - * **`job_id` (Optional, string)**: Identifier for the anomaly detection job. It can be a job identifier, a group name, a list of jobs, or a wildcard expression. If you do not specify one of these options, the API returns information for all anomaly detection jobs. - * **`allow_no_match` (Optional, boolean)**: Specifies what to do when the request: - - 1. Contains wildcard expressions and there are no jobs that match. - 2. Contains the _all string or no identifiers and there are no matches. - 3. Contains wildcard expressions and there are only partial matches. - - -If `true`, the API returns an empty `jobs` array when there are no matches and the subset of results when there are partial matches. If `false`, the API returns a `404` status code when there are no matches or only partial matches. - +## client.security.enrollNode [_security.enroll_node] +Enroll a node. -### get_jobs [_get_jobs] +Enroll a new node to allow it to join an existing cluster with security features enabled. -Get anomaly detection jobs configuration info. You can get information for multiple anomaly detection jobs in a single API request by using a group name, a list of jobs, or a wildcard expression. You can get information for all anomaly detection jobs by using `_all`, by specifying `*` as the ``, or by omitting the ``. +The response contains all the necessary information for the joining node to bootstrap discovery and security related settings so that it can successfully join the cluster. +The response contains key and certificate material that allows the caller to generate valid signed certificates for the HTTP layer of all nodes in the cluster. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-jobs) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-security-enroll-node) ```ts -client.ml.getJobs({ ... }) +client.security.enrollNode() ``` -### Arguments [_arguments_280] - -* **Request (object):** - - * **`job_id` (Optional, string | string[])**: Identifier for the anomaly detection job. It can be a job identifier, a group name, or a wildcard expression. If you do not specify one of these options, the API returns information for all anomaly detection jobs. - * **`allow_no_match` (Optional, boolean)**: Specifies what to do when the request: - - 1. Contains wildcard expressions and there are no jobs that match. - 2. Contains the _all string or no identifiers and there are no matches. - 3. Contains wildcard expressions and there are only partial matches. - - -The default value is `true`, which returns an empty `jobs` array when there are no matches and the subset of results when there are partial matches. If this parameter is `false`, the request returns a `404` status code when there are no matches or only partial matches. *** *`exclude_generated` (Optional, boolean)**: Indicates if certain fields should be removed from the configuration on retrieval. This allows the configuration to be in an acceptable format to be retrieved and then added to another cluster. - - -### get_memory_stats [_get_memory_stats] +## client.security.getApiKey [_security.get_api_key] +Get API key information. -Get machine learning memory usage info. Get information about how machine learning jobs and trained models are using memory, on each node, both within the JVM heap, and natively, outside of the JVM. +Retrieves information for one or more API keys. +NOTE: If you have only the `manage_own_api_key` privilege, this API returns only the API keys that you own. +If you have `read_security`, `manage_api_key` or greater privileges (including `manage_security`), this API returns all API keys regardless of ownership. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-memory-stats) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-security-get-api-key) ```ts -client.ml.getMemoryStats({ ... }) +client.security.getApiKey({ ... }) ``` +### Arguments [_arguments_security.get_api_key] + +#### Request (object) [_request_security.get_api_key] +- **`id` (Optional, string)**: An API key id. +This parameter cannot be used with any of `name`, `realm_name` or `username`. +- **`name` (Optional, string)**: An API key name. +This parameter cannot be used with any of `id`, `realm_name` or `username`. +It supports prefix search with wildcard. +- **`owner` (Optional, boolean)**: A boolean flag that can be used to query API keys owned by the currently authenticated user. +The `realm_name` or `username` parameters cannot be specified when this parameter is set to `true` as they are assumed to be the currently authenticated ones. +- **`realm_name` (Optional, string)**: The name of an authentication realm. +This parameter cannot be used with either `id` or `name` or when `owner` flag is set to `true`. +- **`username` (Optional, string)**: The username of a user. +This parameter cannot be used with either `id` or `name` or when `owner` flag is set to `true`. +- **`with_limited_by` (Optional, boolean)**: Return the snapshot of the owner user's role descriptors +associated with the API key. An API key's actual +permission is the intersection of its assigned role +descriptors and the owner user's role descriptors. +- **`active_only` (Optional, boolean)**: A boolean flag that can be used to query API keys that are currently active. An API key is considered active if it is neither invalidated, nor expired at query time. You can specify this together with other parameters such as `owner` or `name`. If `active_only` is false, the response will include both active and inactive (expired or invalidated) keys. +- **`with_profile_uid` (Optional, boolean)**: Determines whether to also retrieve the profile uid, for the API key owner principal, if it exists. + +## client.security.getBuiltinPrivileges [_security.get_builtin_privileges] +Get builtin privileges. -### Arguments [_arguments_281] - -* **Request (object):** - - * **`node_id` (Optional, string)**: The names of particular nodes in the cluster to target. For example, `nodeId1,nodeId2` or `ml:true` - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - - - -### get_model_snapshot_upgrade_stats [_get_model_snapshot_upgrade_stats] - -Get anomaly detection job model snapshot upgrade usage info. +Get the list of cluster privileges and index privileges that are available in this version of Elasticsearch. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-model-snapshot-upgrade-stats) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-security-get-builtin-privileges) ```ts -client.ml.getModelSnapshotUpgradeStats({ job_id, snapshot_id }) +client.security.getBuiltinPrivileges() ``` -### Arguments [_arguments_282] - -* **Request (object):** - - * **`job_id` (string)**: Identifier for the anomaly detection job. - * **`snapshot_id` (string)**: A numerical character string that uniquely identifies the model snapshot. You can get information for multiple snapshots by using a list or a wildcard expression. You can get all snapshots by using `_all`, by specifying `*` as the snapshot ID, or by omitting the snapshot ID. - * **`allow_no_match` (Optional, boolean)**: Specifies what to do when the request: - - * Contains wildcard expressions and there are no jobs that match. - * Contains the _all string or no identifiers and there are no matches. - * Contains wildcard expressions and there are only partial matches. - - -The default value is true, which returns an empty jobs array when there are no matches and the subset of results when there are partial matches. If this parameter is false, the request returns a 404 status code when there are no matches or only partial matches. - +## client.security.getPrivileges [_security.get_privileges] +Get application privileges. -### get_model_snapshots [_get_model_snapshots] +To use this API, you must have one of the following privileges: -Get model snapshots info. +* The `read_security` cluster privilege (or a greater privilege such as `manage_security` or `all`). +* The "Manage Application Privileges" global privilege for the application being referenced in the request. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-model-snapshots) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-security-get-privileges) ```ts -client.ml.getModelSnapshots({ job_id }) +client.security.getPrivileges({ ... }) ``` +### Arguments [_arguments_security.get_privileges] -### Arguments [_arguments_283] +#### Request (object) [_request_security.get_privileges] +- **`application` (Optional, string)**: The name of the application. +Application privileges are always associated with exactly one application. +If you do not specify this parameter, the API returns information about all privileges for all applications. +- **`name` (Optional, string | string[])**: The name of the privilege. +If you do not specify this parameter, the API returns information about all privileges for the requested application. -* **Request (object):** +## client.security.getRole [_security.get_role] +Get roles. - * **`job_id` (string)**: Identifier for the anomaly detection job. - * **`snapshot_id` (Optional, string)**: A numerical character string that uniquely identifies the model snapshot. You can get information for multiple snapshots by using a list or a wildcard expression. You can get all snapshots by using `_all`, by specifying `*` as the snapshot ID, or by omitting the snapshot ID. - * **`desc` (Optional, boolean)**: Refer to the description for the `desc` query parameter. - * **`end` (Optional, string | Unit)**: Refer to the description for the `end` query parameter. - * **`page` (Optional, { from, size })** - * **`sort` (Optional, string)**: Refer to the description for the `sort` query parameter. - * **`start` (Optional, string | Unit)**: Refer to the description for the `start` query parameter. - * **`from` (Optional, number)**: Skips the specified number of snapshots. - * **`size` (Optional, number)**: Specifies the maximum number of snapshots to obtain. +Get roles in the native realm. +The role management APIs are generally the preferred way to manage roles, rather than using file-based role management. +The get roles API cannot retrieve roles that are defined in roles files. +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-security-get-role) +```ts +client.security.getRole({ ... }) +``` -### get_overall_buckets [_get_overall_buckets] +### Arguments [_arguments_security.get_role] -Get overall bucket results. +#### Request (object) [_request_security.get_role] +- **`name` (Optional, string | string[])**: The name of the role. +You can specify multiple roles as a list. +If you do not specify this parameter, the API returns information about all roles. -Retrievs overall bucket results that summarize the bucket results of multiple anomaly detection jobs. +## client.security.getRoleMapping [_security.get_role_mapping] +Get role mappings. -The `overall_score` is calculated by combining the scores of all the buckets within the overall bucket span. First, the maximum `anomaly_score` per anomaly detection job in the overall bucket is calculated. Then the `top_n` of those scores are averaged to result in the `overall_score`. This means that you can fine-tune the `overall_score` so that it is more or less sensitive to the number of jobs that detect an anomaly at the same time. For example, if you set `top_n` to `1`, the `overall_score` is the maximum bucket score in the overall bucket. Alternatively, if you set `top_n` to the number of jobs, the `overall_score` is high only when all jobs detect anomalies in that overall bucket. If you set the `bucket_span` parameter (to a value greater than its default), the `overall_score` is the maximum `overall_score` of the overall buckets that have a span equal to the jobs' largest bucket span. +Role mappings define which roles are assigned to each user. +The role mapping APIs are generally the preferred way to manage role mappings rather than using role mapping files. +The get role mappings API cannot retrieve role mappings that are defined in role mapping files. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-overall-buckets) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-security-get-role-mapping) ```ts -client.ml.getOverallBuckets({ job_id }) +client.security.getRoleMapping({ ... }) ``` +### Arguments [_arguments_security.get_role_mapping] -### Arguments [_arguments_284] - -* **Request (object):** - - * **`job_id` (string)**: Identifier for the anomaly detection job. It can be a job identifier, a group name, a list of jobs or groups, or a wildcard expression. - - -You can summarize the bucket results for all anomaly detection jobs by using `_all` or by specifying `*` as the ``. ** *`allow_no_match` (Optional, boolean)**: Refer to the description for the `allow_no_match` query parameter. *** *`bucket_span` (Optional, string | -1 | 0)**: Refer to the description for the `bucket_span` query parameter. *** *`end` (Optional, string | Unit)**: Refer to the description for the `end` query parameter. *** *`exclude_interim` (Optional, boolean)**: Refer to the description for the `exclude_interim` query parameter. *** *`overall_score` (Optional, number | string)**: Refer to the description for the `overall_score` query parameter. *** *`start` (Optional, string | Unit)**: Refer to the description for the `start` query parameter. ** *`top_n` (Optional, number)**: Refer to the description for the `top_n` query parameter. +#### Request (object) [_request_security.get_role_mapping] +- **`name` (Optional, string | string[])**: The distinct name that identifies the role mapping. The name is used solely as an identifier to facilitate interaction via the API; it does not affect the behavior of the mapping in any way. You can specify multiple mapping names as a list. If you do not specify this parameter, the API returns information about all role mappings. +## client.security.getServiceAccounts [_security.get_service_accounts] +Get service accounts. -### get_records [_get_records] +Get a list of service accounts that match the provided path parameters. -Get anomaly records for an anomaly detection job. Records contain the detailed analytical results. They describe the anomalous activity that has been identified in the input data based on the detector configuration. There can be many anomaly records depending on the characteristics and size of the input data. In practice, there are often too many to be able to manually process them. The machine learning features therefore perform a sophisticated aggregation of the anomaly records into buckets. The number of record results depends on the number of anomalies found in each bucket, which relates to the number of time series being modeled and the number of detectors. +NOTE: Currently, only the `elastic/fleet-server` service account is available. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-records) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-security-get-service-accounts) ```ts -client.ml.getRecords({ job_id }) +client.security.getServiceAccounts({ ... }) ``` +### Arguments [_arguments_security.get_service_accounts] -### Arguments [_arguments_285] - -* **Request (object):** - - * **`job_id` (string)**: Identifier for the anomaly detection job. - * **`desc` (Optional, boolean)**: Refer to the description for the `desc` query parameter. - * **`end` (Optional, string | Unit)**: Refer to the description for the `end` query parameter. - * **`exclude_interim` (Optional, boolean)**: Refer to the description for the `exclude_interim` query parameter. - * **`page` (Optional, { from, size })** - * **`record_score` (Optional, number)**: Refer to the description for the `record_score` query parameter. - * **`sort` (Optional, string)**: Refer to the description for the `sort` query parameter. - * **`start` (Optional, string | Unit)**: Refer to the description for the `start` query parameter. - * **`from` (Optional, number)**: Skips the specified number of records. - * **`size` (Optional, number)**: Specifies the maximum number of records to obtain. +#### Request (object) [_request_security.get_service_accounts] +- **`namespace` (Optional, string)**: The name of the namespace. +Omit this parameter to retrieve information about all service accounts. +If you omit this parameter, you must also omit the `service` parameter. +- **`service` (Optional, string)**: The service name. +Omit this parameter to retrieve information about all service accounts that belong to the specified `namespace`. +## client.security.getServiceCredentials [_security.get_service_credentials] +Get service account credentials. +To use this API, you must have at least the `read_security` cluster privilege (or a greater privilege such as `manage_service_account` or `manage_security`). -### get_trained_models [_get_trained_models] +The response includes service account tokens that were created with the create service account tokens API as well as file-backed tokens from all nodes of the cluster. -Get trained model configuration info. +NOTE: For tokens backed by the `service_tokens` file, the API collects them from all nodes of the cluster. +Tokens with the same name from different nodes are assumed to be the same token and are only counted once towards the total number of service tokens. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-trained-models) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-security-get-service-credentials) ```ts -client.ml.getTrainedModels({ ... }) +client.security.getServiceCredentials({ namespace, service }) ``` +### Arguments [_arguments_security.get_service_credentials] -### Arguments [_arguments_286] - -* **Request (object):** - - * **`model_id` (Optional, string | string[])**: The unique identifier of the trained model or a model alias. - - -You can get information for multiple trained models in a single API request by using a list of model IDs or a wildcard expression. *** *`allow_no_match` (Optional, boolean)**: Specifies what to do when the request: - -* Contains wildcard expressions and there are no models that match. -* Contains the _all string or no identifiers and there are no matches. -* Contains wildcard expressions and there are only partial matches. - -If true, it returns an empty array when there are no matches and the subset of results when there are partial matches. ** *`decompress_definition` (Optional, boolean)**: Specifies whether the included model definition should be returned as a JSON map (true) or in a custom compressed format (false). *** *`exclude_generated` (Optional, boolean)**: Indicates if certain fields should be removed from the configuration on retrieval. This allows the configuration to be in an acceptable format to be retrieved and then added to another cluster. *** *`from` (Optional, number)**: Skips the specified number of models. *** *`include` (Optional, Enum("definition" | "feature_importance_baseline" | "hyperparameters" | "total_feature_importance" | "definition_status"))**: A comma delimited string of optional fields to include in the response body. *** *`include_model_definition` (Optional, boolean)**: parameter is deprecated! Use [include=definition] instead *** *`size` (Optional, number)**: Specifies the maximum number of models to obtain. ** *`tags` (Optional, string | string[])**: A comma delimited string of tags. A trained model can have many tags, or none. When supplied, only trained models that contain all the supplied tags are returned. +#### Request (object) [_request_security.get_service_credentials] +- **`namespace` (string)**: The name of the namespace. +- **`service` (string)**: The service name. +## client.security.getSettings [_security.get_settings] +Get security index settings. -### get_trained_models_stats [_get_trained_models_stats] +Get the user-configurable settings for the security internal index (`.security` and associated indices). +Only a subset of the index settings — those that are user-configurable—will be shown. +This includes: -Get trained models usage info. You can get usage information for multiple trained models in a single API request by using a list of model IDs or a wildcard expression. +* `index.auto_expand_replicas` +* `index.number_of_replicas` -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-trained-models-stats) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-security-get-settings) ```ts -client.ml.getTrainedModelsStats({ ... }) +client.security.getSettings({ ... }) ``` +### Arguments [_arguments_security.get_settings] -### Arguments [_arguments_287] - -* **Request (object):** - - * **`model_id` (Optional, string | string[])**: The unique identifier of the trained model or a model alias. It can be a list or a wildcard expression. - * **`allow_no_match` (Optional, boolean)**: Specifies what to do when the request: - - * Contains wildcard expressions and there are no models that match. - * Contains the _all string or no identifiers and there are no matches. - * Contains wildcard expressions and there are only partial matches. +#### Request (object) [_request_security.get_settings] +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +## client.security.getToken [_security.get_token] +Get a token. -If true, it returns an empty array when there are no matches and the subset of results when there are partial matches. ** *`from` (Optional, number)**: Skips the specified number of models. ** *`size` (Optional, number)**: Specifies the maximum number of models to obtain. +Create a bearer token for access without requiring basic authentication. +The tokens are created by the Elasticsearch Token Service, which is automatically enabled when you configure TLS on the HTTP interface. +Alternatively, you can explicitly enable the `xpack.security.authc.token.enabled` setting. +When you are running in production mode, a bootstrap check prevents you from enabling the token service unless you also enable TLS on the HTTP interface. +The get token API takes the same parameters as a typical OAuth 2.0 token API except for the use of a JSON request body. -### infer_trained_model [_infer_trained_model] +A successful get token API call returns a JSON structure that contains the access token, the amount of time (seconds) that the token expires in, the type, and the scope if available. -Evaluate a trained model. +The tokens returned by the get token API have a finite period of time for which they are valid and after that time period, they can no longer be used. +That time period is defined by the `xpack.security.authc.token.timeout` setting. +If you want to invalidate a token immediately, you can do so by using the invalidate token API. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-infer-trained-model) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-security-get-token) ```ts -client.ml.inferTrainedModel({ model_id, docs }) +client.security.getToken({ ... }) ``` +### Arguments [_arguments_security.get_token] + +#### Request (object) [_request_security.get_token] +- **`grant_type` (Optional, Enum("password" | "client_credentials" | "_kerberos" | "refresh_token"))**: The type of grant. +Supported grant types are: `password`, `_kerberos`, `client_credentials`, and `refresh_token`. +- **`scope` (Optional, string)**: The scope of the token. +Currently tokens are only issued for a scope of FULL regardless of the value sent with the request. +- **`password` (Optional, string)**: The user's password. +If you specify the `password` grant type, this parameter is required. +This parameter is not valid with any other supported grant type. +- **`kerberos_ticket` (Optional, string)**: The base64 encoded kerberos ticket. +If you specify the `_kerberos` grant type, this parameter is required. +This parameter is not valid with any other supported grant type. +- **`refresh_token` (Optional, string)**: The string that was returned when you created the token, which enables you to extend its life. +If you specify the `refresh_token` grant type, this parameter is required. +This parameter is not valid with any other supported grant type. +- **`username` (Optional, string)**: The username that identifies the user. +If you specify the `password` grant type, this parameter is required. +This parameter is not valid with any other supported grant type. + +## client.security.getUser [_security.get_user] +Get users. -### Arguments [_arguments_288] - -* **Request (object):** - - * **`model_id` (string)**: The unique identifier of the trained model. - * **`docs` (Record[])**: An array of objects to pass to the model for inference. The objects should contain a fields matching your configured trained model input. Typically, for NLP models, the field name is `text_field`. Currently, for NLP models, only a single value is allowed. - * **`inference_config` (Optional, { regression, classification, text_classification, zero_shot_classification, fill_mask, ner, pass_through, text_embedding, text_expansion, question_answering })**: The inference configuration updates to apply on the API call - * **`timeout` (Optional, string | -1 | 0)**: Controls the amount of time to wait for inference results. - - - -### info [_info_3] - -Get machine learning information. Get defaults and limits used by machine learning. This endpoint is designed to be used by a user interface that needs to fully understand machine learning configurations where some options are not specified, meaning that the defaults should be used. This endpoint may be used to find out what those defaults are. It also provides information about the maximum size of machine learning jobs that could run in the current cluster configuration. +Get information about users in the native realm and built-in users. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-info) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-security-get-user) ```ts -client.ml.info() +client.security.getUser({ ... }) ``` +### Arguments [_arguments_security.get_user] + +#### Request (object) [_request_security.get_user] +- **`username` (Optional, string | string[])**: An identifier for the user. You can specify multiple usernames as a list. If you omit this parameter, the API retrieves information about all users. +- **`with_profile_uid` (Optional, boolean)**: Determines whether to retrieve the user profile UID, if it exists, for the users. -### open_job [_open_job] +## client.security.getUserPrivileges [_security.get_user_privileges] +Get user privileges. -Open anomaly detection jobs. An anomaly detection job must be opened to be ready to receive and analyze data. It can be opened and closed multiple times throughout its lifecycle. When you open a new job, it starts with an empty model. When you open an existing job, the most recent model state is automatically loaded. The job is ready to resume its analysis from where it left off, once new data is received. +Get the security privileges for the logged in user. +All users can use this API, but only to determine their own privileges. +To check the privileges of other users, you must use the run as feature. +To check whether a user has a specific list of privileges, use the has privileges API. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-open-job) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-security-get-user-privileges) ```ts -client.ml.openJob({ job_id }) +client.security.getUserPrivileges({ ... }) ``` +### Arguments [_arguments_security.get_user_privileges] -### Arguments [_arguments_289] - -* **Request (object):** - - * **`job_id` (string)**: Identifier for the anomaly detection job. - * **`timeout` (Optional, string | -1 | 0)**: Refer to the description for the `timeout` query parameter. - +#### Request (object) [_request_security.get_user_privileges] +- **`application` (Optional, string)**: The name of the application. Application privileges are always associated with exactly one application. If you do not specify this parameter, the API returns information about all privileges for all applications. +- **`priviledge` (Optional, string)**: The name of the privilege. If you do not specify this parameter, the API returns information about all privileges for the requested application. +- **`username` (Optional, string | null)** +## client.security.getUserProfile [_security.get_user_profile] +Get a user profile. -### post_calendar_events [_post_calendar_events] +Get a user's profile using the unique profile ID. -Add scheduled events to the calendar. +NOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions. +Individual users and external applications should not call this API directly. +Elastic reserves the right to change or remove this feature in future releases without prior notice. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-post-calendar-events) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-security-get-user-profile) ```ts -client.ml.postCalendarEvents({ calendar_id, events }) +client.security.getUserProfile({ uid }) ``` +### Arguments [_arguments_security.get_user_profile] -### Arguments [_arguments_290] +#### Request (object) [_request_security.get_user_profile] +- **`uid` (string | string[])**: A unique identifier for the user profile. +- **`data` (Optional, string | string[])**: A list of filters for the `data` field of the profile document. +To return all content use `data=*`. +To return a subset of content use `data=` to retrieve content nested under the specified ``. +By default returns no `data` content. -* **Request (object):** - - * **`calendar_id` (string)**: A string that uniquely identifies a calendar. - * **`events` ({ calendar_id, event_id, description, end_time, start_time, skip_result, skip_model_update, force_time_shift }[])**: A list of one of more scheduled events. The event’s start and end times can be specified as integer milliseconds since the epoch or as a string in ISO 8601 format. +## client.security.grantApiKey [_security.grant_api_key] +Grant an API key. +Create an API key on behalf of another user. +This API is similar to the create API keys API, however it creates the API key for a user that is different than the user that runs the API. +The caller must have authentication credentials for the user on whose behalf the API key will be created. +It is not possible to use this API to create an API key without that user's credentials. +The supported user authentication credential types are: +* username and password +* Elasticsearch access tokens +* JWTs -### post_data [_post_data] +The user, for whom the authentication credentials is provided, can optionally "run as" (impersonate) another user. +In this case, the API key will be created on behalf of the impersonated user. -Send data to an anomaly detection job for analysis. +This API is intended be used by applications that need to create and manage API keys for end users, but cannot guarantee that those users have permission to create API keys on their own behalf. +The API keys are created by the Elasticsearch API key service, which is automatically enabled. -::::{important} -For each job, data can be accepted from only a single connection at a time. It is not currently possible to post data to multiple jobs using wildcards or a list. -:::: +A successful grant API key API call returns a JSON structure that contains the API key, its unique id, and its name. +If applicable, it also returns expiration information for the API key in milliseconds. +By default, API keys never expire. You can specify expiration information when you create the API keys. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-post-data) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-security-grant-api-key) ```ts -client.ml.postData({ job_id }) +client.security.grantApiKey({ api_key, grant_type }) ``` +### Arguments [_arguments_security.grant_api_key] -### Arguments [_arguments_291] - -* **Request (object):** - - * **`job_id` (string)**: Identifier for the anomaly detection job. The job must have a state of open to receive and process the data. - * **`data` (Optional, TData[])** - * **`reset_end` (Optional, string | Unit)**: Specifies the end of the bucket resetting range. - * **`reset_start` (Optional, string | Unit)**: Specifies the start of the bucket resetting range. - +#### Request (object) [_request_security.grant_api_key] +- **`api_key` ({ name, expiration, role_descriptors, metadata })**: The API key. +- **`grant_type` (Enum("access_token" | "password"))**: The type of grant. Supported grant types are: `access_token`, `password`. +- **`access_token` (Optional, string)**: The user's access token. +If you specify the `access_token` grant type, this parameter is required. +It is not valid with other grant types. +- **`username` (Optional, string)**: The user name that identifies the user. +If you specify the `password` grant type, this parameter is required. +It is not valid with other grant types. +- **`password` (Optional, string)**: The user's password. +If you specify the `password` grant type, this parameter is required. +It is not valid with other grant types. +- **`run_as` (Optional, string)**: The name of the user to be impersonated. +## client.security.hasPrivileges [_security.has_privileges] +Check user privileges. -### preview_data_frame_analytics [_preview_data_frame_analytics] - -Preview features used by data frame analytics. Previews the extracted features used by a data frame analytics config. +Determine whether the specified user has a specified list of privileges. +All users can use this API, but only to determine their own privileges. +To check the privileges of other users, you must use the run as feature. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-preview-data-frame-analytics) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-security-has-privileges) ```ts -client.ml.previewDataFrameAnalytics({ ... }) +client.security.hasPrivileges({ ... }) ``` +### Arguments [_arguments_security.has_privileges] -### Arguments [_arguments_292] - -* **Request (object):** - - * **`id` (Optional, string)**: Identifier for the data frame analytics job. - * **`config` (Optional, { source, analysis, model_memory_limit, max_num_threads, analyzed_fields })**: A data frame analytics config as described in create data frame analytics jobs. Note that `id` and `dest` don’t need to be provided in the context of this API. - +#### Request (object) [_request_security.has_privileges] +- **`user` (Optional, string)**: Username +- **`application` (Optional, { application, privileges, resources }[])** +- **`cluster` (Optional, Enum("all" | "cancel_task" | "create_snapshot" | "cross_cluster_replication" | "cross_cluster_search" | "delegate_pki" | "grant_api_key" | "manage" | "manage_api_key" | "manage_autoscaling" | "manage_behavioral_analytics" | "manage_ccr" | "manage_data_frame_transforms" | "manage_data_stream_global_retention" | "manage_enrich" | "manage_ilm" | "manage_index_templates" | "manage_inference" | "manage_ingest_pipelines" | "manage_logstash_pipelines" | "manage_ml" | "manage_oidc" | "manage_own_api_key" | "manage_pipeline" | "manage_rollup" | "manage_saml" | "manage_search_application" | "manage_search_query_rules" | "manage_search_synonyms" | "manage_security" | "manage_service_account" | "manage_slm" | "manage_token" | "manage_transform" | "manage_user_profile" | "manage_watcher" | "monitor" | "monitor_data_frame_transforms" | "monitor_data_stream_global_retention" | "monitor_enrich" | "monitor_inference" | "monitor_ml" | "monitor_rollup" | "monitor_snapshot" | "monitor_stats" | "monitor_text_structure" | "monitor_transform" | "monitor_watcher" | "none" | "post_behavioral_analytics_event" | "read_ccr" | "read_fleet_secrets" | "read_ilm" | "read_pipeline" | "read_security" | "read_slm" | "transport_client" | "write_connector_secrets" | "write_fleet_secrets")[])**: A list of the cluster privileges that you want to check. +- **`index` (Optional, { names, privileges, allow_restricted_indices }[])** +## client.security.hasPrivilegesUserProfile [_security.has_privileges_user_profile] +Check user profile privileges. -### preview_datafeed [_preview_datafeed] +Determine whether the users associated with the specified user profile IDs have all the requested privileges. -Preview a datafeed. This API returns the first "page" of search results from a datafeed. You can preview an existing datafeed or provide configuration details for a datafeed and anomaly detection job in the API. The preview shows the structure of the data that will be passed to the anomaly detection engine. IMPORTANT: When Elasticsearch security features are enabled, the preview uses the credentials of the user that called the API. However, when the datafeed starts it uses the roles of the last user that created or updated the datafeed. To get a preview that accurately reflects the behavior of the datafeed, use the appropriate credentials. You can also use secondary authorization headers to supply the credentials. +NOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions. Individual users and external applications should not call this API directly. +Elastic reserves the right to change or remove this feature in future releases without prior notice. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-preview-datafeed) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-security-has-privileges-user-profile) ```ts -client.ml.previewDatafeed({ ... }) +client.security.hasPrivilegesUserProfile({ uids, privileges }) ``` +### Arguments [_arguments_security.has_privileges_user_profile] -### Arguments [_arguments_293] - -* **Request (object):** - - * **`datafeed_id` (Optional, string)**: A numerical character string that uniquely identifies the datafeed. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. NOTE: If you use this path parameter, you cannot provide datafeed or anomaly detection job configuration details in the request body. - * **`datafeed_config` (Optional, { aggregations, chunking_config, datafeed_id, delayed_data_check_config, frequency, indices, indices_options, job_id, max_empty_searches, query, query_delay, runtime_mappings, script_fields, scroll_size })**: The datafeed definition to preview. - * **`job_config` (Optional, { allow_lazy_open, analysis_config, analysis_limits, background_persist_interval, custom_settings, daily_model_snapshot_retention_after_days, data_description, datafeed_config, description, groups, job_id, job_type, model_plot_config, model_snapshot_retention_days, renormalization_window_days, results_index_name, results_retention_days })**: The configuration details for the anomaly detection job that is associated with the datafeed. If the `datafeed_config` object does not include a `job_id` that references an existing anomaly detection job, you must supply this `job_config` object. If you include both a `job_id` and a `job_config`, the latter information is used. You cannot specify a `job_config` object unless you also supply a `datafeed_config` object. - * **`start` (Optional, string | Unit)**: The start time from where the datafeed preview should begin - * **`end` (Optional, string | Unit)**: The end time when the datafeed preview should stop +#### Request (object) [_request_security.has_privileges_user_profile] +- **`uids` (string[])**: A list of profile IDs. The privileges are checked for associated users of the profiles. +- **`privileges` ({ application, cluster, index })**: An object containing all the privileges to be checked. +## client.security.invalidateApiKey [_security.invalidate_api_key] +Invalidate API keys. +This API invalidates API keys created by the create API key or grant API key APIs. +Invalidated API keys fail authentication, but they can still be viewed using the get API key information and query API key information APIs, for at least the configured retention period, until they are automatically deleted. -### put_calendar [_put_calendar] +To use this API, you must have at least the `manage_security`, `manage_api_key`, or `manage_own_api_key` cluster privileges. +The `manage_security` privilege allows deleting any API key, including both REST and cross cluster API keys. +The `manage_api_key` privilege allows deleting any REST API key, but not cross cluster API keys. +The `manage_own_api_key` only allows deleting REST API keys that are owned by the user. +In addition, with the `manage_own_api_key` privilege, an invalidation request must be issued in one of the three formats: -Create a calendar. +- Set the parameter `owner=true`. +- Or, set both `username` and `realm_name` to match the user's identity. +- Or, if the request is issued by an API key, that is to say an API key invalidates itself, specify its ID in the `ids` field. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-calendar) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-security-invalidate-api-key) ```ts -client.ml.putCalendar({ calendar_id }) +client.security.invalidateApiKey({ ... }) ``` +### Arguments [_arguments_security.invalidate_api_key] -### Arguments [_arguments_294] - -* **Request (object):** +#### Request (object) [_request_security.invalidate_api_key] +- **`id` (Optional, string)** +- **`ids` (Optional, string[])**: A list of API key ids. +This parameter cannot be used with any of `name`, `realm_name`, or `username`. +- **`name` (Optional, string)**: An API key name. +This parameter cannot be used with any of `ids`, `realm_name` or `username`. +- **`owner` (Optional, boolean)**: Query API keys owned by the currently authenticated user. +The `realm_name` or `username` parameters cannot be specified when this parameter is set to `true` as they are assumed to be the currently authenticated ones. - * **`calendar_id` (string)**: A string that uniquely identifies a calendar. - * **`job_ids` (Optional, string[])**: An array of anomaly detection job identifiers. - * **`description` (Optional, string)**: A description of the calendar. +NOTE: At least one of `ids`, `name`, `username`, and `realm_name` must be specified if `owner` is `false`. +- **`realm_name` (Optional, string)**: The name of an authentication realm. +This parameter cannot be used with either `ids` or `name`, or when `owner` flag is set to `true`. +- **`username` (Optional, string)**: The username of a user. +This parameter cannot be used with either `ids` or `name` or when `owner` flag is set to `true`. +## client.security.invalidateToken [_security.invalidate_token] +Invalidate a token. +The access tokens returned by the get token API have a finite period of time for which they are valid. +After that time period, they can no longer be used. +The time period is defined by the `xpack.security.authc.token.timeout` setting. -### put_calendar_job [_put_calendar_job] +The refresh tokens returned by the get token API are only valid for 24 hours. +They can also be used exactly once. +If you want to invalidate one or more access or refresh tokens immediately, use this invalidate token API. -Add anomaly detection job to calendar. +NOTE: While all parameters are optional, at least one of them is required. +More specifically, either one of `token` or `refresh_token` parameters is required. +If none of these two are specified, then `realm_name` and/or `username` need to be specified. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-calendar-job) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-security-invalidate-token) ```ts -client.ml.putCalendarJob({ calendar_id, job_id }) +client.security.invalidateToken({ ... }) ``` +### Arguments [_arguments_security.invalidate_token] -### Arguments [_arguments_295] - -* **Request (object):** - - * **`calendar_id` (string)**: A string that uniquely identifies a calendar. - * **`job_id` (string | string[])**: An identifier for the anomaly detection jobs. It can be a job identifier, a group name, or a list of jobs or groups. +#### Request (object) [_request_security.invalidate_token] +- **`token` (Optional, string)**: An access token. +This parameter cannot be used if any of `refresh_token`, `realm_name`, or `username` are used. +- **`refresh_token` (Optional, string)**: A refresh token. +This parameter cannot be used if any of `refresh_token`, `realm_name`, or `username` are used. +- **`realm_name` (Optional, string)**: The name of an authentication realm. +This parameter cannot be used with either `refresh_token` or `token`. +- **`username` (Optional, string)**: The username of a user. +This parameter cannot be used with either `refresh_token` or `token`. +## client.security.oidcAuthenticate [_security.oidc_authenticate] +Authenticate OpenID Connect. +Exchange an OpenID Connect authentication response message for an Elasticsearch internal access token and refresh token that can be subsequently used for authentication. -### put_data_frame_analytics [_put_data_frame_analytics] - -Create a data frame analytics job. This API creates a data frame analytics job that performs an analysis on the source indices and stores the outcome in a destination index. By default, the query used in the source configuration is `{"match_all": {}}`. - -If the destination index does not exist, it is created automatically when you start the job. - -If you supply only a subset of the regression or classification parameters, hyperparameter optimization occurs. It determines a value for each of the undefined parameters. +Elasticsearch exposes all the necessary OpenID Connect related functionality with the OpenID Connect APIs. +These APIs are used internally by Kibana in order to provide OpenID Connect based authentication, but can also be used by other, custom web applications or other clients. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-data-frame-analytics) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-security-oidc-authenticate) ```ts -client.ml.putDataFrameAnalytics({ id, analysis, dest, source }) +client.security.oidcAuthenticate({ nonce, redirect_uri, state }) ``` +### Arguments [_arguments_security.oidc_authenticate] -### Arguments [_arguments_296] - -* **Request (object):** - - * **`id` (string)**: Identifier for the data frame analytics job. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. - * **`analysis` ({ classification, outlier_detection, regression })**: The analysis configuration, which contains the information necessary to perform one of the following types of analysis: classification, outlier detection, or regression. - * **`dest` ({ index, results_field })**: The destination configuration. - * **`source` ({ index, query, runtime_mappings, _source })**: The configuration of how to source the analysis data. - * **`allow_lazy_start` (Optional, boolean)**: Specifies whether this job can start when there is insufficient machine learning node capacity for it to be immediately assigned to a node. If set to `false` and a machine learning node with capacity to run the job cannot be immediately found, the API returns an error. If set to `true`, the API does not return an error; the job waits in the `starting` state until sufficient machine learning node capacity is available. This behavior is also affected by the cluster-wide `xpack.ml.max_lazy_ml_nodes` setting. - * **`analyzed_fields` (Optional, { includes, excludes })**: Specifies `includes` and/or `excludes` patterns to select which fields will be included in the analysis. The patterns specified in `excludes` are applied last, therefore `excludes` takes precedence. In other words, if the same field is specified in both `includes` and `excludes`, then the field will not be included in the analysis. If `analyzed_fields` is not set, only the relevant fields will be included. For example, all the numeric fields for outlier detection. The supported fields vary for each type of analysis. Outlier detection requires numeric or `boolean` data to analyze. The algorithms don’t support missing values therefore fields that have data types other than numeric or boolean are ignored. Documents where included fields contain missing values, null values, or an array are also ignored. Therefore the `dest` index may contain documents that don’t have an outlier score. Regression supports fields that are numeric, `boolean`, `text`, `keyword`, and `ip` data types. It is also tolerant of missing values. Fields that are supported are included in the analysis, other fields are ignored. Documents where included fields contain an array with two or more values are also ignored. Documents in the `dest` index that don’t contain a results field are not included in the regression analysis. Classification supports fields that are numeric, `boolean`, `text`, `keyword`, and `ip` data types. It is also tolerant of missing values. Fields that are supported are included in the analysis, other fields are ignored. Documents where included fields contain an array with two or more values are also ignored. Documents in the `dest` index that don’t contain a results field are not included in the classification analysis. Classification analysis can be improved by mapping ordinal variable values to a single number. For example, in case of age ranges, you can model the values as `0-14 = 0`, `15-24 = 1`, `25-34 = 2`, and so on. - * **`description` (Optional, string)**: A description of the job. - * **`max_num_threads` (Optional, number)**: The maximum number of threads to be used by the analysis. Using more threads may decrease the time necessary to complete the analysis at the cost of using more CPU. Note that the process may use additional threads for operational functionality other than the analysis itself. - * **`_meta` (Optional, Record)** - * **`model_memory_limit` (Optional, string)**: The approximate maximum amount of memory resources that are permitted for analytical processing. If your `elasticsearch.yml` file contains an `xpack.ml.max_model_memory_limit` setting, an error occurs when you try to create data frame analytics jobs that have `model_memory_limit` values greater than that setting. - * **`headers` (Optional, Record)** - * **`version` (Optional, string)** - +#### Request (object) [_request_security.oidc_authenticate] +- **`nonce` (string)**: Associate a client session with an ID token and mitigate replay attacks. +This value needs to be the same as the one that was provided to the `/_security/oidc/prepare` API or the one that was generated by Elasticsearch and included in the response to that call. +- **`redirect_uri` (string)**: The URL to which the OpenID Connect Provider redirected the User Agent in response to an authentication request after a successful authentication. +This URL must be provided as-is (URL encoded), taken from the body of the response or as the value of a location header in the response from the OpenID Connect Provider. +- **`state` (string)**: Maintain state between the authentication request and the response. +This value needs to be the same as the one that was provided to the `/_security/oidc/prepare` API or the one that was generated by Elasticsearch and included in the response to that call. +- **`realm` (Optional, string)**: The name of the OpenID Connect realm. +This property is useful in cases where multiple realms are defined. +## client.security.oidcLogout [_security.oidc_logout] +Logout of OpenID Connect. -### put_datafeed [_put_datafeed] +Invalidate an access token and a refresh token that were generated as a response to the `/_security/oidc/authenticate` API. -Create a datafeed. Datafeeds retrieve data from Elasticsearch for analysis by an anomaly detection job. You can associate only one datafeed with each anomaly detection job. The datafeed contains a query that runs at a defined interval (`frequency`). If you are concerned about delayed data, you can add a delay (`query_delay') at each interval. By default, the datafeed uses the following query: `{"match_all": {"boost": 1}}`. +If the OpenID Connect authentication realm in Elasticsearch is accordingly configured, the response to this call will contain a URI pointing to the end session endpoint of the OpenID Connect Provider in order to perform single logout. -When Elasticsearch security features are enabled, your datafeed remembers which roles the user who created it had at the time of creation and runs the query using those same roles. If you provide secondary authorization headers, those credentials are used instead. You must use Kibana, this API, or the create anomaly detection jobs API to create a datafeed. Do not add a datafeed directly to the `.ml-config` index. Do not give users `write` privileges on the `.ml-config` index. +Elasticsearch exposes all the necessary OpenID Connect related functionality with the OpenID Connect APIs. +These APIs are used internally by Kibana in order to provide OpenID Connect based authentication, but can also be used by other, custom web applications or other clients. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-datafeed) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-security-oidc-logout) ```ts -client.ml.putDatafeed({ datafeed_id }) +client.security.oidcLogout({ token }) ``` +### Arguments [_arguments_security.oidc_logout] -### Arguments [_arguments_297] - -* **Request (object):** - - * **`datafeed_id` (string)**: A numerical character string that uniquely identifies the datafeed. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. - * **`aggregations` (Optional, Record)**: If set, the datafeed performs aggregation searches. Support for aggregations is limited and should be used only with low cardinality data. - * **`chunking_config` (Optional, { mode, time_span })**: Datafeeds might be required to search over long time periods, for several months or years. This search is split into time chunks in order to ensure the load on Elasticsearch is managed. Chunking configuration controls how the size of these time chunks are calculated; it is an advanced configuration option. - * **`delayed_data_check_config` (Optional, { check_window, enabled })**: Specifies whether the datafeed checks for missing data and the size of the window. The datafeed can optionally search over indices that have already been read in an effort to determine whether any data has subsequently been added to the index. If missing data is found, it is a good indication that the `query_delay` is set too low and the data is being indexed after the datafeed has passed that moment in time. This check runs only on real-time datafeeds. - * **`frequency` (Optional, string | -1 | 0)**: The interval at which scheduled queries are made while the datafeed runs in real time. The default value is either the bucket span for short bucket spans, or, for longer bucket spans, a sensible fraction of the bucket span. When `frequency` is shorter than the bucket span, interim results for the last (partial) bucket are written then eventually overwritten by the full bucket results. If the datafeed uses aggregations, this value must be divisible by the interval of the date histogram aggregation. - * **`indices` (Optional, string | string[])**: An array of index names. Wildcards are supported. If any of the indices are in remote clusters, the machine learning nodes must have the `remote_cluster_client` role. - * **`indices_options` (Optional, { allow_no_indices, expand_wildcards, ignore_unavailable, ignore_throttled })**: Specifies index expansion options that are used during search - * **`job_id` (Optional, string)**: Identifier for the anomaly detection job. - * **`max_empty_searches` (Optional, number)**: If a real-time datafeed has never seen any data (including during any initial training period), it automatically stops and closes the associated job after this many real-time searches return no documents. In other words, it stops after `frequency` times `max_empty_searches` of real-time operation. If not set, a datafeed with no end time that sees no data remains started until it is explicitly stopped. By default, it is not set. - * **`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: The Elasticsearch query domain-specific language (DSL). This value corresponds to the query object in an Elasticsearch search POST body. All the options that are supported by Elasticsearch can be used, as this object is passed verbatim to Elasticsearch. - * **`query_delay` (Optional, string | -1 | 0)**: The number of seconds behind real time that data is queried. For example, if data from 10:04 a.m. might not be searchable in Elasticsearch until 10:06 a.m., set this property to 120 seconds. The default value is randomly selected between `60s` and `120s`. This randomness improves the query performance when there are multiple jobs running on the same node. - * **`runtime_mappings` (Optional, Record)**: Specifies runtime fields for the datafeed search. - * **`script_fields` (Optional, Record)**: Specifies scripts that evaluate custom expressions and returns script fields to the datafeed. The detector configuration objects in a job can contain functions that use these script fields. - * **`scroll_size` (Optional, number)**: The size parameter that is used in Elasticsearch searches when the datafeed does not use aggregations. The maximum value is the value of `index.max_result_window`, which is 10,000 by default. - * **`headers` (Optional, Record)** - * **`allow_no_indices` (Optional, boolean)**: If true, wildcard indices expressions that resolve into no concrete indices are ignored. This includes the `_all` string or when no indices are specified. - * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values. - * **`ignore_throttled` (Optional, boolean)**: If true, concrete, expanded, or aliased indices are ignored when frozen. - * **`ignore_unavailable` (Optional, boolean)**: If true, unavailable indices (missing or closed) are ignored. +#### Request (object) [_request_security.oidc_logout] +- **`token` (string)**: The access token to be invalidated. +- **`refresh_token` (Optional, string)**: The refresh token to be invalidated. +## client.security.oidcPrepareAuthentication [_security.oidc_prepare_authentication] +Prepare OpenID connect authentication. +Create an oAuth 2.0 authentication request as a URL string based on the configuration of the OpenID Connect authentication realm in Elasticsearch. -### put_filter [_put_filter] +The response of this API is a URL pointing to the Authorization Endpoint of the configured OpenID Connect Provider, which can be used to redirect the browser of the user in order to continue the authentication process. -Create a filter. A filter contains a list of strings. It can be used by one or more anomaly detection jobs. Specifically, filters are referenced in the `custom_rules` property of detector configuration objects. +Elasticsearch exposes all the necessary OpenID Connect related functionality with the OpenID Connect APIs. +These APIs are used internally by Kibana in order to provide OpenID Connect based authentication, but can also be used by other, custom web applications or other clients. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-filter) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-security-oidc-prepare-authentication) ```ts -client.ml.putFilter({ filter_id }) +client.security.oidcPrepareAuthentication({ ... }) ``` +### Arguments [_arguments_security.oidc_prepare_authentication] + +#### Request (object) [_request_security.oidc_prepare_authentication] +- **`iss` (Optional, string)**: In the case of a third party initiated single sign on, this is the issuer identifier for the OP that the RP is to send the authentication request to. +It cannot be specified when *realm* is specified. +One of *realm* or *iss* is required. +- **`login_hint` (Optional, string)**: In the case of a third party initiated single sign on, it is a string value that is included in the authentication request as the *login_hint* parameter. +This parameter is not valid when *realm* is specified. +- **`nonce` (Optional, string)**: The value used to associate a client session with an ID token and to mitigate replay attacks. +If the caller of the API does not provide a value, Elasticsearch will generate one with sufficient entropy and return it in the response. +- **`realm` (Optional, string)**: The name of the OpenID Connect realm in Elasticsearch the configuration of which should be used in order to generate the authentication request. +It cannot be specified when *iss* is specified. +One of *realm* or *iss* is required. +- **`state` (Optional, string)**: The value used to maintain state between the authentication request and the response, typically used as a Cross-Site Request Forgery mitigation. +If the caller of the API does not provide a value, Elasticsearch will generate one with sufficient entropy and return it in the response. -### Arguments [_arguments_298] +## client.security.putPrivileges [_security.put_privileges] +Create or update application privileges. -* **Request (object):** +To use this API, you must have one of the following privileges: - * **`filter_id` (string)**: A string that uniquely identifies a filter. - * **`description` (Optional, string)**: A description of the filter. - * **`items` (Optional, string[])**: The items of the filter. A wildcard `*` can be used at the beginning or the end of an item. Up to 10000 items are allowed in each filter. +* The `manage_security` cluster privilege (or a greater privilege such as `all`). +* The "Manage Application Privileges" global privilege for the application being referenced in the request. +Application names are formed from a prefix, with an optional suffix that conform to the following rules: +* The prefix must begin with a lowercase ASCII letter. +* The prefix must contain only ASCII letters or digits. +* The prefix must be at least 3 characters long. +* If the suffix exists, it must begin with either a dash `-` or `_`. +* The suffix cannot contain any of the following characters: `\`, `/`, `*`, `?`, `"`, `<`, `>`, `|`, `,`, `*`. +* No part of the name can contain whitespace. -### put_job [_put_job] +Privilege names must begin with a lowercase ASCII letter and must contain only ASCII letters and digits along with the characters `_`, `-`, and `.`. -Create an anomaly detection job. If you include a `datafeed_config`, you must have read index privileges on the source index. If you include a `datafeed_config` but do not provide a query, the datafeed uses `{"match_all": {"boost": 1}}`. +Action names can contain any number of printable ASCII characters and must contain at least one of the following characters: `/`, `*`, `:`. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-job) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-security-put-privileges) ```ts -client.ml.putJob({ job_id, analysis_config, data_description }) +client.security.putPrivileges({ ... }) ``` +### Arguments [_arguments_security.put_privileges] -### Arguments [_arguments_299] +#### Request (object) [_request_security.put_privileges] +- **`privileges` (Optional, Record>)** +- **`refresh` (Optional, Enum(true | false | "wait_for"))**: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. -* **Request (object):** +## client.security.putRole [_security.put_role] +Create or update roles. - * **`job_id` (string)**: The identifier for the anomaly detection job. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. - * **`analysis_config` ({ bucket_span, categorization_analyzer, categorization_field_name, categorization_filters, detectors, influencers, latency, model_prune_window, multivariate_by_fields, per_partition_categorization, summary_count_field_name })**: Specifies how to analyze the data. After you create a job, you cannot change the analysis configuration; all the properties are informational. - * **`data_description` ({ format, time_field, time_format, field_delimiter })**: Defines the format of the input data when you send data to the job by using the post data API. Note that when configure a datafeed, these properties are automatically set. When data is received via the post data API, it is not stored in Elasticsearch. Only the results for anomaly detection are retained. - * **`allow_lazy_open` (Optional, boolean)**: Advanced configuration option. Specifies whether this job can open when there is insufficient machine learning node capacity for it to be immediately assigned to a node. By default, if a machine learning node with capacity to run the job cannot immediately be found, the open anomaly detection jobs API returns an error. However, this is also subject to the cluster-wide `xpack.ml.max_lazy_ml_nodes` setting. If this option is set to true, the open anomaly detection jobs API does not return an error and the job waits in the opening state until sufficient machine learning node capacity is available. - * **`analysis_limits` (Optional, { categorization_examples_limit, model_memory_limit })**: Limits can be applied for the resources required to hold the mathematical models in memory. These limits are approximate and can be set per job. They do not control the memory used by other processes, for example the Elasticsearch Java processes. - * **`background_persist_interval` (Optional, string | -1 | 0)**: Advanced configuration option. The time between each periodic persistence of the model. The default value is a randomized value between 3 to 4 hours, which avoids all jobs persisting at exactly the same time. The smallest allowed value is 1 hour. For very large models (several GB), persistence could take 10-20 minutes, so do not set the `background_persist_interval` value too low. - * **`custom_settings` (Optional, User-defined value)**: Advanced configuration option. Contains custom meta data about the job. - * **`daily_model_snapshot_retention_after_days` (Optional, number)**: Advanced configuration option, which affects the automatic removal of old model snapshots for this job. It specifies a period of time (in days) after which only the first snapshot per day is retained. This period is relative to the timestamp of the most recent snapshot for this job. Valid values range from 0 to `model_snapshot_retention_days`. - * **`datafeed_config` (Optional, { aggregations, chunking_config, datafeed_id, delayed_data_check_config, frequency, indices, indices_options, job_id, max_empty_searches, query, query_delay, runtime_mappings, script_fields, scroll_size })**: Defines a datafeed for the anomaly detection job. If Elasticsearch security features are enabled, your datafeed remembers which roles the user who created it had at the time of creation and runs the query using those same roles. If you provide secondary authorization headers, those credentials are used instead. - * **`description` (Optional, string)**: A description of the job. - * **`groups` (Optional, string[])**: A list of job groups. A job can belong to no groups or many. - * **`model_plot_config` (Optional, { annotations_enabled, enabled, terms })**: This advanced configuration option stores model information along with the results. It provides a more detailed view into anomaly detection. If you enable model plot it can add considerable overhead to the performance of the system; it is not feasible for jobs with many entities. Model plot provides a simplified and indicative view of the model and its bounds. It does not display complex features such as multivariate correlations or multimodal data. As such, anomalies may occasionally be reported which cannot be seen in the model plot. Model plot config can be configured when the job is created or updated later. It must be disabled if performance issues are experienced. - * **`model_snapshot_retention_days` (Optional, number)**: Advanced configuration option, which affects the automatic removal of old model snapshots for this job. It specifies the maximum period of time (in days) that snapshots are retained. This period is relative to the timestamp of the most recent snapshot for this job. By default, snapshots ten days older than the newest snapshot are deleted. - * **`renormalization_window_days` (Optional, number)**: Advanced configuration option. The period over which adjustments to the score are applied, as new data is seen. The default value is the longer of 30 days or 100 bucket spans. - * **`results_index_name` (Optional, string)**: A text string that affects the name of the machine learning results index. By default, the job generates an index named `.ml-anomalies-shared`. - * **`results_retention_days` (Optional, number)**: Advanced configuration option. The period of time (in days) that results are retained. Age is calculated relative to the timestamp of the latest bucket result. If this property has a non-null value, once per day at 00:30 (server time), results that are the specified number of days older than the latest bucket result are deleted from Elasticsearch. The default value is null, which means all results are retained. Annotations generated by the system also count as results for retention purposes; they are deleted after the same number of days as results. Annotations added by users are retained forever. - * **`allow_no_indices` (Optional, boolean)**: If `true`, wildcard indices expressions that resolve into no concrete indices are ignored. This includes the `_all` string or when no indices are specified. - * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values. Valid values are: +The role management APIs are generally the preferred way to manage roles in the native realm, rather than using file-based role management. +The create or update roles API cannot update roles that are defined in roles files. +File-based role management is not available in Elastic Serverless. -* `all`: Match any data stream or index, including hidden ones. -* `closed`: Match closed, non-hidden indices. Also matches any non-hidden data stream. Data streams cannot be closed. -* `hidden`: Match hidden data streams and hidden indices. Must be combined with `open`, `closed`, or both. -* `none`: Wildcard patterns are not accepted. -* `open`: Match open, non-hidden indices. Also matches any non-hidden data stream. - - * **`ignore_throttled` (Optional, boolean)**: If `true`, concrete, expanded or aliased indices are ignored when frozen. - * **`ignore_unavailable` (Optional, boolean)**: If `true`, unavailable indices (missing or closed) are ignored. - - - -### put_trained_model [_put_trained_model] - -Create a trained model. Enable you to supply a trained model that is not created by data frame analytics. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-trained-model) - -```ts -client.ml.putTrainedModel({ model_id }) -``` - - -### Arguments [_arguments_300] - -* **Request (object):** - - * **`model_id` (string)**: The unique identifier of the trained model. - * **`compressed_definition` (Optional, string)**: The compressed (GZipped and Base64 encoded) inference definition of the model. If compressed_definition is specified, then definition cannot be specified. - * **`definition` (Optional, { preprocessors, trained_model })**: The inference definition for the model. If definition is specified, then compressed_definition cannot be specified. - * **`description` (Optional, string)**: A human-readable description of the inference trained model. - * **`inference_config` (Optional, { regression, classification, text_classification, zero_shot_classification, fill_mask, ner, pass_through, text_embedding, text_expansion, question_answering })**: The default configuration for inference. This can be either a regression or classification configuration. It must match the underlying definition.trained_model’s target_type. For pre-packaged models such as ELSER the config is not required. - * **`input` (Optional, { field_names })**: The input field names for the model definition. - * **`metadata` (Optional, User-defined value)**: An object map that contains metadata about the model. - * **`model_type` (Optional, Enum("tree_ensemble" | "lang_ident" | "pytorch"))**: The model type. - * **`model_size_bytes` (Optional, number)**: The estimated memory usage in bytes to keep the trained model in memory. This property is supported only if defer_definition_decompression is true or the model definition is not supplied. - * **`platform_architecture` (Optional, string)**: The platform architecture (if applicable) of the trained mode. If the model only works on one platform, because it is heavily optimized for a particular processor architecture and OS combination, then this field specifies which. The format of the string must match the platform identifiers used by Elasticsearch, so one of, `linux-x86_64`, `linux-aarch64`, `darwin-x86_64`, `darwin-aarch64`, or `windows-x86_64`. For portable models (those that work independent of processor architecture or OS features), leave this field unset. - * **`tags` (Optional, string[])**: An array of tags to organize the model. - * **`prefix_strings` (Optional, { ingest, search })**: Optional prefix strings applied at inference - * **`defer_definition_decompression` (Optional, boolean)**: If set to `true` and a `compressed_definition` is provided, the request defers definition decompression and skips relevant validations. - * **`wait_for_completion` (Optional, boolean)**: Whether to wait for all child operations (e.g. model download) to complete. - - - -### put_trained_model_alias [_put_trained_model_alias] - -Create or update a trained model alias. A trained model alias is a logical name used to reference a single trained model. You can use aliases instead of trained model identifiers to make it easier to reference your models. For example, you can use aliases in inference aggregations and processors. An alias must be unique and refer to only a single trained model. However, you can have multiple aliases for each trained model. If you use this API to update an alias such that it references a different trained model ID and the model uses a different type of data frame analytics, an error occurs. For example, this situation occurs if you have a trained model for regression analysis and a trained model for classification analysis; you cannot reassign an alias from one type of trained model to another. If you use this API to update an alias and there are very few input fields in common between the old and new trained models for the model alias, the API returns a warning. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-trained-model-alias) - -```ts -client.ml.putTrainedModelAlias({ model_alias, model_id }) -``` - - -### Arguments [_arguments_301] - -* **Request (object):** - - * **`model_alias` (string)**: The alias to create or update. This value cannot end in numbers. - * **`model_id` (string)**: The identifier for the trained model that the alias refers to. - * **`reassign` (Optional, boolean)**: Specifies whether the alias gets reassigned to the specified trained model if it is already assigned to a different model. If the alias is already assigned and this parameter is false, the API returns an error. - - - -### put_trained_model_definition_part [_put_trained_model_definition_part] - -Create part of a trained model definition. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-trained-model-definition-part) - -```ts -client.ml.putTrainedModelDefinitionPart({ model_id, part, definition, total_definition_length, total_parts }) -``` - - -### Arguments [_arguments_302] - -* **Request (object):** - - * **`model_id` (string)**: The unique identifier of the trained model. - * **`part` (number)**: The definition part number. When the definition is loaded for inference the definition parts are streamed in the order of their part number. The first part must be `0` and the final part must be `total_parts - 1`. - * **`definition` (string)**: The definition part for the model. Must be a base64 encoded string. - * **`total_definition_length` (number)**: The total uncompressed definition length in bytes. Not base64 encoded. - * **`total_parts` (number)**: The total number of parts that will be uploaded. Must be greater than 0. - - - -### put_trained_model_vocabulary [_put_trained_model_vocabulary] - -Create a trained model vocabulary. This API is supported only for natural language processing (NLP) models. The vocabulary is stored in the index as described in `inference_config.*.vocabulary` of the trained model definition. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-trained-model-vocabulary) - -```ts -client.ml.putTrainedModelVocabulary({ model_id, vocabulary }) -``` - - -### Arguments [_arguments_303] - -* **Request (object):** - - * **`model_id` (string)**: The unique identifier of the trained model. - * **`vocabulary` (string[])**: The model vocabulary, which must not be empty. - * **`merges` (Optional, string[])**: The optional model merges if required by the tokenizer. - * **`scores` (Optional, number[])**: The optional vocabulary value scores if required by the tokenizer. - - - -### reset_job [_reset_job] - -Reset an anomaly detection job. All model state and results are deleted. The job is ready to start over as if it had just been created. It is not currently possible to reset multiple jobs using wildcards or a comma separated list. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-reset-job) - -```ts -client.ml.resetJob({ job_id }) -``` - - -### Arguments [_arguments_304] - -* **Request (object):** - - * **`job_id` (string)**: The ID of the job to reset. - * **`wait_for_completion` (Optional, boolean)**: Should this request wait until the operation has completed before returning. - * **`delete_user_annotations` (Optional, boolean)**: Specifies whether annotations that have been added by the user should be deleted along with any auto-generated annotations when the job is reset. - - - -### revert_model_snapshot [_revert_model_snapshot] - -Revert to a snapshot. The machine learning features react quickly to anomalous input, learning new behaviors in data. Highly anomalous input increases the variance in the models whilst the system learns whether this is a new step-change in behavior or a one-off event. In the case where this anomalous input is known to be a one-off, then it might be appropriate to reset the model state to a time before this event. For example, you might consider reverting to a saved snapshot after Black Friday or a critical system failure. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-revert-model-snapshot) - -```ts -client.ml.revertModelSnapshot({ job_id, snapshot_id }) -``` - - -### Arguments [_arguments_305] - -* **Request (object):** - - * **`job_id` (string)**: Identifier for the anomaly detection job. - * **`snapshot_id` (string)**: You can specify `empty` as the . Reverting to the empty snapshot means the anomaly detection job starts learning a new model from scratch when it is started. - * **`delete_intervening_results` (Optional, boolean)**: Refer to the description for the `delete_intervening_results` query parameter. - - - -### set_upgrade_mode [_set_upgrade_mode] - -Set upgrade_mode for ML indices. Sets a cluster wide upgrade_mode setting that prepares machine learning indices for an upgrade. When upgrading your cluster, in some circumstances you must restart your nodes and reindex your machine learning indices. In those circumstances, there must be no machine learning jobs running. You can close the machine learning jobs, do the upgrade, then open all the jobs again. Alternatively, you can use this API to temporarily halt tasks associated with the jobs and datafeeds and prevent new jobs from opening. You can also use this API during upgrades that do not require you to reindex your machine learning indices, though stopping jobs is not a requirement in that case. You can see the current value for the upgrade_mode setting by using the get machine learning info API. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-set-upgrade-mode) - -```ts -client.ml.setUpgradeMode({ ... }) -``` - - -### Arguments [_arguments_306] - -* **Request (object):** - - * **`enabled` (Optional, boolean)**: When `true`, it enables `upgrade_mode` which temporarily halts all job and datafeed tasks and prohibits new job and datafeed tasks from starting. - * **`timeout` (Optional, string | -1 | 0)**: The time to wait for the request to be completed. - - - -### start_data_frame_analytics [_start_data_frame_analytics] - -Start a data frame analytics job. A data frame analytics job can be started and stopped multiple times throughout its lifecycle. If the destination index does not exist, it is created automatically the first time you start the data frame analytics job. The `index.number_of_shards` and `index.number_of_replicas` settings for the destination index are copied from the source index. If there are multiple source indices, the destination index copies the highest setting values. The mappings for the destination index are also copied from the source indices. If there are any mapping conflicts, the job fails to start. If the destination index exists, it is used as is. You can therefore set up the destination index in advance with custom settings and mappings. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-start-data-frame-analytics) - -```ts -client.ml.startDataFrameAnalytics({ id }) -``` - - -### Arguments [_arguments_307] - -* **Request (object):** - - * **`id` (string)**: Identifier for the data frame analytics job. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. - * **`timeout` (Optional, string | -1 | 0)**: Controls the amount of time to wait until the data frame analytics job starts. - - - -### start_datafeed [_start_datafeed] - -Start datafeeds. - -A datafeed must be started in order to retrieve data from Elasticsearch. A datafeed can be started and stopped multiple times throughout its lifecycle. - -Before you can start a datafeed, the anomaly detection job must be open. Otherwise, an error occurs. - -If you restart a stopped datafeed, it continues processing input data from the next millisecond after it was stopped. If new data was indexed for that exact millisecond between stopping and starting, it will be ignored. - -When Elasticsearch security features are enabled, your datafeed remembers which roles the last user to create or update it had at the time of creation or update and runs the query using those same roles. If you provided secondary authorization headers when you created or updated the datafeed, those credentials are used instead. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-start-datafeed) - -```ts -client.ml.startDatafeed({ datafeed_id }) -``` - - -### Arguments [_arguments_308] - -* **Request (object):** - - * **`datafeed_id` (string)**: A numerical character string that uniquely identifies the datafeed. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. - * **`end` (Optional, string | Unit)**: Refer to the description for the `end` query parameter. - * **`start` (Optional, string | Unit)**: Refer to the description for the `start` query parameter. - * **`timeout` (Optional, string | -1 | 0)**: Refer to the description for the `timeout` query parameter. - - - -### start_trained_model_deployment [_start_trained_model_deployment] - -Start a trained model deployment. It allocates the model to every machine learning node. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-start-trained-model-deployment) - -```ts -client.ml.startTrainedModelDeployment({ model_id }) -``` - - -### Arguments [_arguments_309] - -* **Request (object):** - - * **`model_id` (string)**: The unique identifier of the trained model. Currently, only PyTorch models are supported. - * **`cache_size` (Optional, number | string)**: The inference cache size (in memory outside the JVM heap) per node for the model. The default value is the same size as the `model_size_bytes`. To disable the cache, `0b` can be provided. - * **`deployment_id` (Optional, string)**: A unique identifier for the deployment of the model. - * **`number_of_allocations` (Optional, number)**: The number of model allocations on each node where the model is deployed. All allocations on a node share the same copy of the model in memory but use a separate set of threads to evaluate the model. Increasing this value generally increases the throughput. If this setting is greater than the number of hardware threads it will automatically be changed to a value less than the number of hardware threads. - * **`priority` (Optional, Enum("normal" | "low"))**: The deployment priority. - * **`queue_capacity` (Optional, number)**: Specifies the number of inference requests that are allowed in the queue. After the number of requests exceeds this value, new requests are rejected with a 429 error. - * **`threads_per_allocation` (Optional, number)**: Sets the number of threads used by each model allocation during inference. This generally increases the inference speed. The inference process is a compute-bound process; any number greater than the number of available hardware threads on the machine does not increase the inference speed. If this setting is greater than the number of hardware threads it will automatically be changed to a value less than the number of hardware threads. - * **`timeout` (Optional, string | -1 | 0)**: Specifies the amount of time to wait for the model to deploy. - * **`wait_for` (Optional, Enum("started" | "starting" | "fully_allocated"))**: Specifies the allocation status to wait for before returning. - - - -### stop_data_frame_analytics [_stop_data_frame_analytics] - -Stop data frame analytics jobs. A data frame analytics job can be started and stopped multiple times throughout its lifecycle. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-stop-data-frame-analytics) - -```ts -client.ml.stopDataFrameAnalytics({ id }) -``` - - -### Arguments [_arguments_310] - -* **Request (object):** - - * **`id` (string)**: Identifier for the data frame analytics job. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. - * **`allow_no_match` (Optional, boolean)**: Specifies what to do when the request: - - 1. Contains wildcard expressions and there are no data frame analytics jobs that match. - 2. Contains the _all string or no identifiers and there are no matches. - 3. Contains wildcard expressions and there are only partial matches. - - -The default value is true, which returns an empty data_frame_analytics array when there are no matches and the subset of results when there are partial matches. If this parameter is false, the request returns a 404 status code when there are no matches or only partial matches. ** *`force` (Optional, boolean)**: If true, the data frame analytics job is stopped forcefully. ** *`timeout` (Optional, string | -1 | 0)**: Controls the amount of time to wait until the data frame analytics job stops. Defaults to 20 seconds. - - -### stop_datafeed [_stop_datafeed] - -Stop datafeeds. A datafeed that is stopped ceases to retrieve data from Elasticsearch. A datafeed can be started and stopped multiple times throughout its lifecycle. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-stop-datafeed) - -```ts -client.ml.stopDatafeed({ datafeed_id }) -``` - - -### Arguments [_arguments_311] - -* **Request (object):** - - * **`datafeed_id` (string)**: Identifier for the datafeed. You can stop multiple datafeeds in a single API request by using a comma-separated list of datafeeds or a wildcard expression. You can close all datafeeds by using `_all` or by specifying `*` as the identifier. - * **`allow_no_match` (Optional, boolean)**: Refer to the description for the `allow_no_match` query parameter. - * **`force` (Optional, boolean)**: Refer to the description for the `force` query parameter. - * **`timeout` (Optional, string | -1 | 0)**: Refer to the description for the `timeout` query parameter. - - - -### stop_trained_model_deployment [_stop_trained_model_deployment] - -Stop a trained model deployment. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-stop-trained-model-deployment) - -```ts -client.ml.stopTrainedModelDeployment({ model_id }) -``` - - -### Arguments [_arguments_312] - -* **Request (object):** - - * **`model_id` (string)**: The unique identifier of the trained model. - * **`allow_no_match` (Optional, boolean)**: Specifies what to do when the request: contains wildcard expressions and there are no deployments that match; contains the `_all` string or no identifiers and there are no matches; or contains wildcard expressions and there are only partial matches. By default, it returns an empty array when there are no matches and the subset of results when there are partial matches. If `false`, the request returns a 404 status code when there are no matches or only partial matches. - * **`force` (Optional, boolean)**: Forcefully stops the deployment, even if it is used by ingest pipelines. You can’t use these pipelines until you restart the model deployment. - - - -### update_data_frame_analytics [_update_data_frame_analytics] - -Update a data frame analytics job. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-update-data-frame-analytics) - -```ts -client.ml.updateDataFrameAnalytics({ id }) -``` - - -### Arguments [_arguments_313] - -* **Request (object):** - - * **`id` (string)**: Identifier for the data frame analytics job. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. - * **`description` (Optional, string)**: A description of the job. - * **`model_memory_limit` (Optional, string)**: The approximate maximum amount of memory resources that are permitted for analytical processing. If your `elasticsearch.yml` file contains an `xpack.ml.max_model_memory_limit` setting, an error occurs when you try to create data frame analytics jobs that have `model_memory_limit` values greater than that setting. - * **`max_num_threads` (Optional, number)**: The maximum number of threads to be used by the analysis. Using more threads may decrease the time necessary to complete the analysis at the cost of using more CPU. Note that the process may use additional threads for operational functionality other than the analysis itself. - * **`allow_lazy_start` (Optional, boolean)**: Specifies whether this job can start when there is insufficient machine learning node capacity for it to be immediately assigned to a node. - - - -### update_datafeed [_update_datafeed] - -Update a datafeed. You must stop and start the datafeed for the changes to be applied. When Elasticsearch security features are enabled, your datafeed remembers which roles the user who updated it had at the time of the update and runs the query using those same roles. If you provide secondary authorization headers, those credentials are used instead. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-update-datafeed) - -```ts -client.ml.updateDatafeed({ datafeed_id }) -``` - - -### Arguments [_arguments_314] - -* **Request (object):** - - * **`datafeed_id` (string)**: A numerical character string that uniquely identifies the datafeed. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. - * **`aggregations` (Optional, Record)**: If set, the datafeed performs aggregation searches. Support for aggregations is limited and should be used only with low cardinality data. - * **`chunking_config` (Optional, { mode, time_span })**: Datafeeds might search over long time periods, for several months or years. This search is split into time chunks in order to ensure the load on Elasticsearch is managed. Chunking configuration controls how the size of these time chunks are calculated; it is an advanced configuration option. - * **`delayed_data_check_config` (Optional, { check_window, enabled })**: Specifies whether the datafeed checks for missing data and the size of the window. The datafeed can optionally search over indices that have already been read in an effort to determine whether any data has subsequently been added to the index. If missing data is found, it is a good indication that the `query_delay` is set too low and the data is being indexed after the datafeed has passed that moment in time. This check runs only on real-time datafeeds. - * **`frequency` (Optional, string | -1 | 0)**: The interval at which scheduled queries are made while the datafeed runs in real time. The default value is either the bucket span for short bucket spans, or, for longer bucket spans, a sensible fraction of the bucket span. When `frequency` is shorter than the bucket span, interim results for the last (partial) bucket are written then eventually overwritten by the full bucket results. If the datafeed uses aggregations, this value must be divisible by the interval of the date histogram aggregation. - * **`indices` (Optional, string[])**: An array of index names. Wildcards are supported. If any of the indices are in remote clusters, the machine learning nodes must have the `remote_cluster_client` role. - * **`indices_options` (Optional, { allow_no_indices, expand_wildcards, ignore_unavailable, ignore_throttled })**: Specifies index expansion options that are used during search. - * **`job_id` (Optional, string)** - * **`max_empty_searches` (Optional, number)**: If a real-time datafeed has never seen any data (including during any initial training period), it automatically stops and closes the associated job after this many real-time searches return no documents. In other words, it stops after `frequency` times `max_empty_searches` of real-time operation. If not set, a datafeed with no end time that sees no data remains started until it is explicitly stopped. By default, it is not set. - * **`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: The Elasticsearch query domain-specific language (DSL). This value corresponds to the query object in an Elasticsearch search POST body. All the options that are supported by Elasticsearch can be used, as this object is passed verbatim to Elasticsearch. Note that if you change the query, the analyzed data is also changed. Therefore, the time required to learn might be long and the understandability of the results is unpredictable. If you want to make significant changes to the source data, it is recommended that you clone the job and datafeed and make the amendments in the clone. Let both run in parallel and close one when you are satisfied with the results of the job. - * **`query_delay` (Optional, string | -1 | 0)**: The number of seconds behind real time that data is queried. For example, if data from 10:04 a.m. might not be searchable in Elasticsearch until 10:06 a.m., set this property to 120 seconds. The default value is randomly selected between `60s` and `120s`. This randomness improves the query performance when there are multiple jobs running on the same node. - * **`runtime_mappings` (Optional, Record)**: Specifies runtime fields for the datafeed search. - * **`script_fields` (Optional, Record)**: Specifies scripts that evaluate custom expressions and returns script fields to the datafeed. The detector configuration objects in a job can contain functions that use these script fields. - * **`scroll_size` (Optional, number)**: The size parameter that is used in Elasticsearch searches when the datafeed does not use aggregations. The maximum value is the value of `index.max_result_window`. - * **`allow_no_indices` (Optional, boolean)**: If `true`, wildcard indices expressions that resolve into no concrete indices are ignored. This includes the `_all` string or when no indices are specified. - * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values. Valid values are: - -* `all`: Match any data stream or index, including hidden ones. -* `closed`: Match closed, non-hidden indices. Also matches any non-hidden data stream. Data streams cannot be closed. -* `hidden`: Match hidden data streams and hidden indices. Must be combined with `open`, `closed`, or both. -* `none`: Wildcard patterns are not accepted. -* `open`: Match open, non-hidden indices. Also matches any non-hidden data stream. - - * **`ignore_throttled` (Optional, boolean)**: If `true`, concrete, expanded or aliased indices are ignored when frozen. - * **`ignore_unavailable` (Optional, boolean)**: If `true`, unavailable indices (missing or closed) are ignored. - - - -### update_filter [_update_filter] - -Update a filter. Updates the description of a filter, adds items, or removes items from the list. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-update-filter) - -```ts -client.ml.updateFilter({ filter_id }) -``` - - -### Arguments [_arguments_315] - -* **Request (object):** - - * **`filter_id` (string)**: A string that uniquely identifies a filter. - * **`add_items` (Optional, string[])**: The items to add to the filter. - * **`description` (Optional, string)**: A description for the filter. - * **`remove_items` (Optional, string[])**: The items to remove from the filter. - - - -### update_job [_update_job] - -Update an anomaly detection job. Updates certain properties of an anomaly detection job. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-update-job) - -```ts -client.ml.updateJob({ job_id }) -``` - - -### Arguments [_arguments_316] - -* **Request (object):** - - * **`job_id` (string)**: Identifier for the job. - * **`allow_lazy_open` (Optional, boolean)**: Advanced configuration option. Specifies whether this job can open when there is insufficient machine learning node capacity for it to be immediately assigned to a node. If `false` and a machine learning node with capacity to run the job cannot immediately be found, the open anomaly detection jobs API returns an error. However, this is also subject to the cluster-wide `xpack.ml.max_lazy_ml_nodes` setting. If this option is set to `true`, the open anomaly detection jobs API does not return an error and the job waits in the opening state until sufficient machine learning node capacity is available. - * **`analysis_limits` (Optional, { model_memory_limit })** - * **`background_persist_interval` (Optional, string | -1 | 0)**: Advanced configuration option. The time between each periodic persistence of the model. The default value is a randomized value between 3 to 4 hours, which avoids all jobs persisting at exactly the same time. The smallest allowed value is 1 hour. For very large models (several GB), persistence could take 10-20 minutes, so do not set the value too low. If the job is open when you make the update, you must stop the datafeed, close the job, then reopen the job and restart the datafeed for the changes to take effect. - * **`custom_settings` (Optional, Record)**: Advanced configuration option. Contains custom meta data about the job. For example, it can contain custom URL information as shown in Adding custom URLs to machine learning results. - * **`categorization_filters` (Optional, string[])** - * **`description` (Optional, string)**: A description of the job. - * **`model_plot_config` (Optional, { annotations_enabled, enabled, terms })** - * **`model_prune_window` (Optional, string | -1 | 0)** - * **`daily_model_snapshot_retention_after_days` (Optional, number)**: Advanced configuration option, which affects the automatic removal of old model snapshots for this job. It specifies a period of time (in days) after which only the first snapshot per day is retained. This period is relative to the timestamp of the most recent snapshot for this job. Valid values range from 0 to `model_snapshot_retention_days`. For jobs created before version 7.8.0, the default value matches `model_snapshot_retention_days`. - * **`model_snapshot_retention_days` (Optional, number)**: Advanced configuration option, which affects the automatic removal of old model snapshots for this job. It specifies the maximum period of time (in days) that snapshots are retained. This period is relative to the timestamp of the most recent snapshot for this job. - * **`renormalization_window_days` (Optional, number)**: Advanced configuration option. The period over which adjustments to the score are applied, as new data is seen. - * **`results_retention_days` (Optional, number)**: Advanced configuration option. The period of time (in days) that results are retained. Age is calculated relative to the timestamp of the latest bucket result. If this property has a non-null value, once per day at 00:30 (server time), results that are the specified number of days older than the latest bucket result are deleted from Elasticsearch. The default value is null, which means all results are retained. - * **`groups` (Optional, string[])**: A list of job groups. A job can belong to no groups or many. - * **`detectors` (Optional, { detector_index, description, custom_rules }[])**: An array of detector update objects. - * **`per_partition_categorization` (Optional, { enabled, stop_on_warn })**: Settings related to how categorization interacts with partition fields. - - - -### update_model_snapshot [_update_model_snapshot] - -Update a snapshot. Updates certain properties of a snapshot. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-update-model-snapshot) - -```ts -client.ml.updateModelSnapshot({ job_id, snapshot_id }) -``` - - -### Arguments [_arguments_317] - -* **Request (object):** - - * **`job_id` (string)**: Identifier for the anomaly detection job. - * **`snapshot_id` (string)**: Identifier for the model snapshot. - * **`description` (Optional, string)**: A description of the model snapshot. - * **`retain` (Optional, boolean)**: If `true`, this snapshot will not be deleted during automatic cleanup of snapshots older than `model_snapshot_retention_days`. However, this snapshot will be deleted when the job is deleted. - - - -### update_trained_model_deployment [_update_trained_model_deployment] - -Update a trained model deployment. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-update-trained-model-deployment) - -```ts -client.ml.updateTrainedModelDeployment({ model_id }) -``` - - -### Arguments [_arguments_318] - -* **Request (object):** - - * **`model_id` (string)**: The unique identifier of the trained model. Currently, only PyTorch models are supported. - * **`number_of_allocations` (Optional, number)**: The number of model allocations on each node where the model is deployed. All allocations on a node share the same copy of the model in memory but use a separate set of threads to evaluate the model. Increasing this value generally increases the throughput. If this setting is greater than the number of hardware threads it will automatically be changed to a value less than the number of hardware threads. - - - -### upgrade_job_snapshot [_upgrade_job_snapshot] - -Upgrade a snapshot. Upgrades an anomaly detection model snapshot to the latest major version. Over time, older snapshot formats are deprecated and removed. Anomaly detection jobs support only snapshots that are from the current or previous major version. This API provides a means to upgrade a snapshot to the current major version. This aids in preparing the cluster for an upgrade to the next major version. Only one snapshot per anomaly detection job can be upgraded at a time and the upgraded snapshot cannot be the current snapshot of the anomaly detection job. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-upgrade-job-snapshot) - -```ts -client.ml.upgradeJobSnapshot({ job_id, snapshot_id }) -``` - - -### Arguments [_arguments_319] - -* **Request (object):** - - * **`job_id` (string)**: Identifier for the anomaly detection job. - * **`snapshot_id` (string)**: A numerical character string that uniquely identifies the model snapshot. - * **`wait_for_completion` (Optional, boolean)**: When true, the API won’t respond until the upgrade is complete. Otherwise, it responds as soon as the upgrade task is assigned to a node. - * **`timeout` (Optional, string | -1 | 0)**: Controls the time to wait for the request to complete. - - - -## nodes [_nodes_2] - - -### clear_repositories_metering_archive [_clear_repositories_metering_archive] - -Clear the archived repositories metering. Clear the archived repositories metering information in the cluster. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-nodes-clear-repositories-metering-archive) - -```ts -client.nodes.clearRepositoriesMeteringArchive({ node_id, max_archive_version }) -``` - - -### Arguments [_arguments_320] - -* **Request (object):** - - * **`node_id` (string | string[])**: List of node IDs or names used to limit returned information. - * **`max_archive_version` (number)**: Specifies the maximum `archive_version` to be cleared from the archive. - - - -### get_repositories_metering_info [_get_repositories_metering_info] - -Get cluster repositories metering. Get repositories metering information for a cluster. This API exposes monotonically non-decreasing counters and it is expected that clients would durably store the information needed to compute aggregations over a period of time. Additionally, the information exposed by this API is volatile, meaning that it will not be present after node restarts. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-nodes-get-repositories-metering-info) - -```ts -client.nodes.getRepositoriesMeteringInfo({ node_id }) -``` - - -### Arguments [_arguments_321] - -* **Request (object):** - - * **`node_id` (string | string[])**: List of node IDs or names used to limit returned information. All the nodes selective options are explained [here](https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-cluster). - - - -### hot_threads [_hot_threads] - -Get the hot threads for nodes. Get a breakdown of the hot threads on each selected node in the cluster. The output is plain text with a breakdown of the top hot threads for each node. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-nodes-hot-threads) - -```ts -client.nodes.hotThreads({ ... }) -``` - - -### Arguments [_arguments_322] - -* **Request (object):** - - * **`node_id` (Optional, string | string[])**: List of node IDs or names used to limit returned information. - * **`ignore_idle_threads` (Optional, boolean)**: If true, known idle threads (e.g. waiting in a socket select, or to get a task from an empty queue) are filtered out. - * **`interval` (Optional, string | -1 | 0)**: The interval to do the second sampling of threads. - * **`snapshots` (Optional, number)**: Number of samples of thread stacktrace. - * **`threads` (Optional, number)**: Specifies the number of hot threads to provide information for. - * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - * **`type` (Optional, Enum("cpu" | "wait" | "block" | "gpu" | "mem"))**: The type to sample. - * **`sort` (Optional, Enum("cpu" | "wait" | "block" | "gpu" | "mem"))**: The sort order for *cpu* type (default: total) - - - -### info [_info_4] - -Get node information. By default, the API returns all attributes and core settings for cluster nodes. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-nodes-info) - -```ts -client.nodes.info({ ... }) -``` - - -### Arguments [_arguments_323] - -* **Request (object):** - - * **`node_id` (Optional, string | string[])**: List of node IDs or names used to limit returned information. - * **`metric` (Optional, string | string[])**: Limits the information returned to the specific metrics. Supports a list, such as http,ingest. - * **`flat_settings` (Optional, boolean)**: If true, returns settings in flat format. - * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - - - -### reload_secure_settings [_reload_secure_settings] - -Reload the keystore on nodes in the cluster. - -Secure settings are stored in an on-disk keystore. Certain of these settings are reloadable. That is, you can change them on disk and reload them without restarting any nodes in the cluster. When you have updated reloadable secure settings in your keystore, you can use this API to reload those settings on each node. - -When the Elasticsearch keystore is password protected and not simply obfuscated, you must provide the password for the keystore when you reload the secure settings. Reloading the settings for the whole cluster assumes that the keystores for all nodes are protected with the same password; this method is allowed only when inter-node communications are encrypted. Alternatively, you can reload the secure settings on each node by locally accessing the API and passing the node-specific Elasticsearch keystore password. - -[Endpoint documentation](docs-content://deploy-manage/security/secure-settings.md) - -```ts -client.nodes.reloadSecureSettings({ ... }) -``` - - -### Arguments [_arguments_324] - -* **Request (object):** - - * **`node_id` (Optional, string | string[])**: The names of particular nodes in the cluster to target. - * **`secure_settings_password` (Optional, string)**: The password for the Elasticsearch keystore. - * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - - - -### stats [_stats_5] - -Get node statistics. Get statistics for nodes in a cluster. By default, all stats are returned. You can limit the returned information by using metrics. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-nodes-stats) - -```ts -client.nodes.stats({ ... }) -``` - - -### Arguments [_arguments_325] - -* **Request (object):** - - * **`node_id` (Optional, string | string[])**: List of node IDs or names used to limit returned information. - * **`metric` (Optional, string | string[])**: Limit the information returned to the specified metrics - * **`index_metric` (Optional, string | string[])**: Limit the information returned for indices metric to the specific index metrics. It can be used only if indices (or all) metric is specified. - * **`completion_fields` (Optional, string | string[])**: List or wildcard expressions of fields to include in fielddata and suggest statistics. - * **`fielddata_fields` (Optional, string | string[])**: List or wildcard expressions of fields to include in fielddata statistics. - * **`fields` (Optional, string | string[])**: List or wildcard expressions of fields to include in the statistics. - * **`groups` (Optional, boolean)**: List of search groups to include in the search statistics. - * **`include_segment_file_sizes` (Optional, boolean)**: If true, the call reports the aggregated disk usage of each one of the Lucene index files (only applies if segment stats are requested). - * **`level` (Optional, Enum("cluster" | "indices" | "shards"))**: Indicates whether statistics are aggregated at the cluster, index, or shard level. - * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - * **`types` (Optional, string[])**: A list of document types for the indexing index metric. - * **`include_unloaded_segments` (Optional, boolean)**: If `true`, the response includes information from segments that are not loaded into memory. - - - -### usage [_usage] - -Get feature usage information. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-nodes-usage) - -```ts -client.nodes.usage({ ... }) -``` - - -### Arguments [_arguments_326] - -* **Request (object):** - - * **`node_id` (Optional, string | string[])**: A list of node IDs or names to limit the returned information; use `_local` to return information from the node you’re connecting to, leave empty to get information from all nodes - * **`metric` (Optional, string | string[])**: Limits the information returned to the specific metrics. A list of the following options: `_all`, `rest_actions`. - * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - - - -## query_rules [_query_rules] - - -### delete_rule [_delete_rule] - -Delete a query rule. Delete a query rule within a query ruleset. This is a destructive action that is only recoverable by re-adding the same rule with the create or update query rule API. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-query-rules-delete-rule) - -```ts -client.queryRules.deleteRule({ ruleset_id, rule_id }) -``` - - -### Arguments [_arguments_327] - -* **Request (object):** - - * **`ruleset_id` (string)**: The unique identifier of the query ruleset containing the rule to delete - * **`rule_id` (string)**: The unique identifier of the query rule within the specified ruleset to delete - - - -### delete_ruleset [_delete_ruleset] - -Delete a query ruleset. Remove a query ruleset and its associated data. This is a destructive action that is not recoverable. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-query-rules-delete-ruleset) - -```ts -client.queryRules.deleteRuleset({ ruleset_id }) -``` - - -### Arguments [_arguments_328] - -* **Request (object):** - - * **`ruleset_id` (string)**: The unique identifier of the query ruleset to delete - - - -### get_rule [_get_rule] - -Get a query rule. Get details about a query rule within a query ruleset. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-query-rules-get-rule) - -```ts -client.queryRules.getRule({ ruleset_id, rule_id }) -``` - - -### Arguments [_arguments_329] - -* **Request (object):** - - * **`ruleset_id` (string)**: The unique identifier of the query ruleset containing the rule to retrieve - * **`rule_id` (string)**: The unique identifier of the query rule within the specified ruleset to retrieve - - - -### get_ruleset [_get_ruleset] - -Get a query ruleset. Get details about a query ruleset. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-query-rules-get-ruleset) - -```ts -client.queryRules.getRuleset({ ruleset_id }) -``` - - -### Arguments [_arguments_330] - -* **Request (object):** - - * **`ruleset_id` (string)**: The unique identifier of the query ruleset - - - -### list_rulesets [_list_rulesets] - -Get all query rulesets. Get summarized information about the query rulesets. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-query-rules-list-rulesets) - -```ts -client.queryRules.listRulesets({ ... }) -``` - - -### Arguments [_arguments_331] - -* **Request (object):** - - * **`from` (Optional, number)**: The offset from the first result to fetch. - * **`size` (Optional, number)**: The maximum number of results to retrieve. - - - -### put_rule [_put_rule] - -Create or update a query rule. Create or update a query rule within a query ruleset. - -::::{important} -Due to limitations within pinned queries, you can only pin documents using ids or docs, but cannot use both in single rule. It is advised to use one or the other in query rulesets, to avoid errors. Additionally, pinned queries have a maximum limit of 100 pinned hits. If multiple matching rules pin more than 100 documents, only the first 100 documents are pinned in the order they are specified in the ruleset. -:::: - - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-query-rules-put-rule) - -```ts -client.queryRules.putRule({ ruleset_id, rule_id, type, criteria, actions }) -``` - - -### Arguments [_arguments_332] - -* **Request (object):** - - * **`ruleset_id` (string)**: The unique identifier of the query ruleset containing the rule to be created or updated. - * **`rule_id` (string)**: The unique identifier of the query rule within the specified ruleset to be created or updated. - * **`type` (Enum("pinned" | "exclude"))**: The type of rule. - * **`criteria` ({ type, metadata, values } | { type, metadata, values }[])**: The criteria that must be met for the rule to be applied. If multiple criteria are specified for a rule, all criteria must be met for the rule to be applied. - * **`actions` ({ ids, docs })**: The actions to take when the rule is matched. The format of this action depends on the rule type. - * **`priority` (Optional, number)** - - - -### put_ruleset [_put_ruleset] - -Create or update a query ruleset. There is a limit of 100 rules per ruleset. This limit can be increased by using the `xpack.applications.rules.max_rules_per_ruleset` cluster setting. - -::::{important} -Due to limitations within pinned queries, you can only select documents using `ids` or `docs`, but cannot use both in single rule. It is advised to use one or the other in query rulesets, to avoid errors. Additionally, pinned queries have a maximum limit of 100 pinned hits. If multiple matching rules pin more than 100 documents, only the first 100 documents are pinned in the order they are specified in the ruleset. -:::: - - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-query-rules-put-ruleset) - -```ts -client.queryRules.putRuleset({ ruleset_id, rules }) -``` - - -### Arguments [_arguments_333] - -* **Request (object):** - - * **`ruleset_id` (string)**: The unique identifier of the query ruleset to be created or updated. - * **`rules` ({ rule_id, type, criteria, actions, priority } | { rule_id, type, criteria, actions, priority }[])** - - - -### test [_test] - -Test a query ruleset. Evaluate match criteria against a query ruleset to identify the rules that would match that criteria. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-query-rules-test) - -```ts -client.queryRules.test({ ruleset_id, match_criteria }) -``` - - -### Arguments [_arguments_334] - -* **Request (object):** - - * **`ruleset_id` (string)**: The unique identifier of the query ruleset to be created or updated - * **`match_criteria` (Record)**: The match criteria to apply to rules in the given query ruleset. Match criteria should match the keys defined in the `criteria.metadata` field of the rule. - - - -## rollup [_rollup] - - -### delete_job [_delete_job_2] - -Delete a rollup job. - -A job must be stopped before it can be deleted. If you attempt to delete a started job, an error occurs. Similarly, if you attempt to delete a nonexistent job, an exception occurs. - -::::{important} -When you delete a job, you remove only the process that is actively monitoring and rolling up data. The API does not delete any previously rolled up data. This is by design; a user may wish to roll up a static data set. Because the data set is static, after it has been fully rolled up there is no need to keep the indexing rollup job around (as there will be no new data). Thus the job can be deleted, leaving behind the rolled up data for analysis. If you wish to also remove the rollup data and the rollup index contains the data for only a single job, you can delete the whole rollup index. If the rollup index stores data from several jobs, you must issue a delete-by-query that targets the rollup job’s identifier in the rollup index. For example: -:::: - - -``` -POST my_rollup_index/_delete_by_query -{ - "query": { - "term": { - "_rollup.id": "the_rollup_job_id" - } - } -} -``` - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rollup-delete-job) - -```ts -client.rollup.deleteJob({ id }) -``` - - -### Arguments [_arguments_335] - -* **Request (object):** - - * **`id` (string)**: Identifier for the job. - - - -### get_jobs [_get_jobs_2] - -Get rollup job information. Get the configuration, stats, and status of rollup jobs. - -::::{note} -This API returns only active (both `STARTED` and `STOPPED`) jobs. If a job was created, ran for a while, then was deleted, the API does not return any details about it. For details about a historical rollup job, the rollup capabilities API may be more useful. -:::: - - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rollup-get-jobs) - -```ts -client.rollup.getJobs({ ... }) -``` - - -### Arguments [_arguments_336] - -* **Request (object):** - - * **`id` (Optional, string)**: Identifier for the rollup job. If it is `_all` or omitted, the API returns all rollup jobs. - - - -### get_rollup_caps [_get_rollup_caps] - -Get the rollup job capabilities. Get the capabilities of any rollup jobs that have been configured for a specific index or index pattern. - -This API is useful because a rollup job is often configured to rollup only a subset of fields from the source index. Furthermore, only certain aggregations can be configured for various fields, leading to a limited subset of functionality depending on that configuration. This API enables you to inspect an index and determine: - -1. Does this index have associated rollup data somewhere in the cluster? -2. If yes to the first question, what fields were rolled up, what aggregations can be performed, and where does the data live? - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rollup-get-rollup-caps) - -```ts -client.rollup.getRollupCaps({ ... }) -``` - - -### Arguments [_arguments_337] - -* **Request (object):** - - * **`id` (Optional, string)**: Index, indices or index-pattern to return rollup capabilities for. `_all` may be used to fetch rollup capabilities from all jobs. - - - -### get_rollup_index_caps [_get_rollup_index_caps] - -Get the rollup index capabilities. Get the rollup capabilities of all jobs inside of a rollup index. A single rollup index may store the data for multiple rollup jobs and may have a variety of capabilities depending on those jobs. This API enables you to determine: - -* What jobs are stored in an index (or indices specified via a pattern)? -* What target indices were rolled up, what fields were used in those rollups, and what aggregations can be performed on each job? - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rollup-get-rollup-index-caps) - -```ts -client.rollup.getRollupIndexCaps({ index }) -``` - - -### Arguments [_arguments_338] - -* **Request (object):** - - * **`index` (string | string[])**: Data stream or index to check for rollup capabilities. Wildcard (`*`) expressions are supported. - - - -### put_job [_put_job_2] - -Create a rollup job. - -::::{warning} -From 8.15.0, calling this API in a cluster with no rollup usage will fail with a message about the deprecation and planned removal of rollup features. A cluster needs to contain either a rollup job or a rollup index in order for this API to be allowed to run. -:::: - - -The rollup job configuration contains all the details about how the job should run, when it indexes documents, and what future queries will be able to run against the rollup index. - -There are three main sections to the job configuration: the logistical details about the job (for example, the cron schedule), the fields that are used for grouping, and what metrics to collect for each group. - -Jobs are created in a `STOPPED` state. You can start them with the start rollup jobs API. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rollup-put-job) - -```ts -client.rollup.putJob({ id, cron, groups, index_pattern, page_size, rollup_index }) -``` - - -### Arguments [_arguments_339] - -* **Request (object):** - - * **`id` (string)**: Identifier for the rollup job. This can be any alphanumeric string and uniquely identifies the data that is associated with the rollup job. The ID is persistent; it is stored with the rolled up data. If you create a job, let it run for a while, then delete the job, the data that the job rolled up is still be associated with this job ID. You cannot create a new job with the same ID since that could lead to problems with mismatched job configurations. - * **`cron` (string)**: A cron string which defines the intervals when the rollup job should be executed. When the interval triggers, the indexer attempts to rollup the data in the index pattern. The cron pattern is unrelated to the time interval of the data being rolled up. For example, you may wish to create hourly rollups of your document but to only run the indexer on a daily basis at midnight, as defined by the cron. The cron pattern is defined just like a Watcher cron schedule. - * **`groups` ({ date_histogram, histogram, terms })**: Defines the grouping fields and aggregations that are defined for this rollup job. These fields will then be available later for aggregating into buckets. These aggs and fields can be used in any combination. Think of the groups configuration as defining a set of tools that can later be used in aggregations to partition the data. Unlike raw data, we have to think ahead to which fields and aggregations might be used. Rollups provide enough flexibility that you simply need to determine which fields are needed, not in what order they are needed. - * **`index_pattern` (string)**: The index or index pattern to roll up. Supports wildcard-style patterns (`logstash-*`). The job attempts to rollup the entire index or index-pattern. - * **`page_size` (number)**: The number of bucket results that are processed on each iteration of the rollup indexer. A larger value tends to execute faster, but requires more memory during processing. This value has no effect on how the data is rolled up; it is merely used for tweaking the speed or memory cost of the indexer. - * **`rollup_index` (string)**: The index that contains the rollup results. The index can be shared with other rollup jobs. The data is stored so that it doesn’t interfere with unrelated jobs. - * **`metrics` (Optional, { field, metrics }[])**: Defines the metrics to collect for each grouping tuple. By default, only the doc_counts are collected for each group. To make rollup useful, you will often add metrics like averages, mins, maxes, etc. Metrics are defined on a per-field basis and for each field you configure which metric should be collected. - * **`timeout` (Optional, string | -1 | 0)**: Time to wait for the request to complete. - * **`headers` (Optional, Record)** - - - -### rollup_search [_rollup_search] - -Search rolled-up data. The rollup search endpoint is needed because, internally, rolled-up documents utilize a different document structure than the original data. It rewrites standard Query DSL into a format that matches the rollup documents then takes the response and rewrites it back to what a client would expect given the original query. - -The request body supports a subset of features from the regular search API. The following functionality is not available: - -`size`: Because rollups work on pre-aggregated data, no search hits can be returned and so size must be set to zero or omitted entirely. `highlighter`, `suggestors`, `post_filter`, `profile`, `explain`: These are similarly disallowed. - -**Searching both historical rollup and non-rollup data** - -The rollup search API has the capability to search across both "live" non-rollup data and the aggregated rollup data. This is done by simply adding the live indices to the URI. For example: - -``` -GET sensor-1,sensor_rollup/_rollup_search -{ - "size": 0, - "aggregations": { - "max_temperature": { - "max": { - "field": "temperature" - } - } - } -} -``` - -The rollup search endpoint does two things when the search runs: - -* The original request is sent to the non-rollup index unaltered. -* A rewritten version of the original request is sent to the rollup index. - -When the two responses are received, the endpoint rewrites the rollup response and merges the two together. During the merging process, if there is any overlap in buckets between the two responses, the buckets from the non-rollup index are used. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rollup-rollup-search) - -```ts -client.rollup.rollupSearch({ index }) -``` - - -### Arguments [_arguments_340] - -* **Request (object):** - - * **`index` (string | string[])**: A list of data streams and indices used to limit the request. This parameter has the following rules: - -* At least one data stream, index, or wildcard expression must be specified. This target can include a rollup or non-rollup index. For data streams, the stream’s backing indices can only serve as non-rollup indices. Omitting the parameter or using `_all` are not permitted. -* Multiple non-rollup indices may be specified. -* Only one rollup index may be specified. If more than one are supplied, an exception occurs. -* Wildcard expressions (`*`) may be used. If they match more than one rollup index, an exception occurs. However, you can use an expression to match multiple non-rollup indices or data streams. - - * **`aggregations` (Optional, Record)**: Specifies aggregations. - * **`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: Specifies a DSL query that is subject to some limitations. - * **`size` (Optional, number)**: Must be zero if set, as rollups work on pre-aggregated data. - * **`rest_total_hits_as_int` (Optional, boolean)**: Indicates whether hits.total should be rendered as an integer or an object in the rest search response - * **`typed_keys` (Optional, boolean)**: Specify whether aggregation and suggester names should be prefixed by their respective types in the response - - - -### start_job [_start_job] - -Start rollup jobs. If you try to start a job that does not exist, an exception occurs. If you try to start a job that is already started, nothing happens. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rollup-start-job) - -```ts -client.rollup.startJob({ id }) -``` - - -### Arguments [_arguments_341] - -* **Request (object):** - - * **`id` (string)**: Identifier for the rollup job. - - - -### stop_job [_stop_job] - -Stop rollup jobs. If you try to stop a job that does not exist, an exception occurs. If you try to stop a job that is already stopped, nothing happens. - -Since only a stopped job can be deleted, it can be useful to block the API until the indexer has fully stopped. This is accomplished with the `wait_for_completion` query parameter, and optionally a timeout. For example: - -``` -POST _rollup/job/sensor/_stop?wait_for_completion=true&timeout=10s -``` - -The parameter blocks the API call from returning until either the job has moved to STOPPED or the specified time has elapsed. If the specified time elapses without the job moving to STOPPED, a timeout exception occurs. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rollup-stop-job) - -```ts -client.rollup.stopJob({ id }) -``` - - -### Arguments [_arguments_342] - -* **Request (object):** - - * **`id` (string)**: Identifier for the rollup job. - * **`timeout` (Optional, string | -1 | 0)**: If `wait_for_completion` is `true`, the API blocks for (at maximum) the specified duration while waiting for the job to stop. If more than `timeout` time has passed, the API throws a timeout exception. NOTE: Even if a timeout occurs, the stop request is still processing and eventually moves the job to STOPPED. The timeout simply means the API call itself timed out while waiting for the status change. - * **`wait_for_completion` (Optional, boolean)**: If set to `true`, causes the API to block until the indexer state completely stops. If set to `false`, the API returns immediately and the indexer is stopped asynchronously in the background. - - - -## search_application [_search_application] - - -### delete [_delete_8] - -Delete a search application. Remove a search application and its associated alias. Indices attached to the search application are not removed. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-delete) - -```ts -client.searchApplication.delete({ name }) -``` - - -### Arguments [_arguments_343] - -* **Request (object):** - - * **`name` (string)**: The name of the search application to delete - - - -### delete_behavioral_analytics [_delete_behavioral_analytics] - -Delete a behavioral analytics collection. The associated data stream is also deleted. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-delete-behavioral-analytics) - -```ts -client.searchApplication.deleteBehavioralAnalytics({ name }) -``` - - -### Arguments [_arguments_344] - -* **Request (object):** - - * **`name` (string)**: The name of the analytics collection to be deleted - - - -### get [_get_8] - -Get search application details. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-get) - -```ts -client.searchApplication.get({ name }) -``` - - -### Arguments [_arguments_345] - -* **Request (object):** - - * **`name` (string)**: The name of the search application - - - -### get_behavioral_analytics [_get_behavioral_analytics] - -Get behavioral analytics collections. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-get-behavioral-analytics) - -```ts -client.searchApplication.getBehavioralAnalytics({ ... }) -``` - - -### Arguments [_arguments_346] - -* **Request (object):** - - * **`name` (Optional, string[])**: A list of analytics collections to limit the returned information - - - -### list [_list_2] - -Get search applications. Get information about search applications. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-list) - -```ts -client.searchApplication.list({ ... }) -``` - - -### Arguments [_arguments_347] - -* **Request (object):** - - * **`q` (Optional, string)**: Query in the Lucene query string syntax. - * **`from` (Optional, number)**: Starting offset. - * **`size` (Optional, number)**: Specifies a max number of results to get. - - - -### post_behavioral_analytics_event [_post_behavioral_analytics_event] - -Create a behavioral analytics collection event. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-post-behavioral-analytics-event) - -```ts -client.searchApplication.postBehavioralAnalyticsEvent({ collection_name, event_type }) -``` - - -### Arguments [_arguments_348] - -* **Request (object):** - - * **`collection_name` (string)**: The name of the behavioral analytics collection. - * **`event_type` (Enum("page_view" | "search" | "search_click"))**: The analytics event type. - * **`payload` (Optional, User-defined value)** - * **`debug` (Optional, boolean)**: Whether the response type has to include more details - - - -### put [_put_3] - -Create or update a search application. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-put) - -```ts -client.searchApplication.put({ name }) -``` - - -### Arguments [_arguments_349] - -* **Request (object):** - - * **`name` (string)**: The name of the search application to be created or updated. - * **`search_application` (Optional, { indices, analytics_collection_name, template })** - * **`create` (Optional, boolean)**: If `true`, this request cannot replace or update existing Search Applications. - - - -### put_behavioral_analytics [_put_behavioral_analytics] - -Create a behavioral analytics collection. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-put-behavioral-analytics) - -```ts -client.searchApplication.putBehavioralAnalytics({ name }) -``` - - -### Arguments [_arguments_350] - -* **Request (object):** - - * **`name` (string)**: The name of the analytics collection to be created or updated. - - - -### render_query [_render_query] - -Render a search application query. Generate an Elasticsearch query using the specified query parameters and the search template associated with the search application or a default template if none is specified. If a parameter used in the search template is not specified in `params`, the parameter’s default value will be used. The API returns the specific Elasticsearch query that would be generated and run by calling the search application search API. - -You must have `read` privileges on the backing alias of the search application. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-render-query) - -```ts -client.searchApplication.renderQuery({ name }) -``` - - -### Arguments [_arguments_351] - -* **Request (object):** - - * **`name` (string)**: The name of the search application to render teh query for. - * **`params` (Optional, Record)** - - - -### search [_search_4] - -Run a search application search. Generate and run an Elasticsearch query that uses the specified query parameteter and the search template associated with the search application or default template. Unspecified template parameters are assigned their default values if applicable. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-search) - -```ts -client.searchApplication.search({ name }) -``` - - -### Arguments [_arguments_352] - -* **Request (object):** - - * **`name` (string)**: The name of the search application to be searched. - * **`params` (Optional, Record)**: Query parameters specific to this request, which will override any defaults specified in the template. - * **`typed_keys` (Optional, boolean)**: Determines whether aggregation names are prefixed by their respective types in the response. - - - -## searchable_snapshots [_searchable_snapshots] - - -### cache_stats [_cache_stats] - -Get cache statistics. Get statistics about the shared cache for partially mounted indices. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-searchable-snapshots-cache-stats) - -```ts -client.searchableSnapshots.cacheStats({ ... }) -``` - - -### Arguments [_arguments_353] - -* **Request (object):** - - * **`node_id` (Optional, string | string[])**: The names of the nodes in the cluster to target. - * **`master_timeout` (Optional, string | -1 | 0)** - - - -### clear_cache [_clear_cache_2] - -Clear the cache. Clear indices and data streams from the shared cache for partially mounted indices. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-searchable-snapshots-clear-cache) - -```ts -client.searchableSnapshots.clearCache({ ... }) -``` - - -### Arguments [_arguments_354] - -* **Request (object):** - - * **`index` (Optional, string | string[])**: A list of data streams, indices, and aliases to clear from the cache. It supports wildcards (`*`). - * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Whether to expand wildcard expression to concrete indices that are open, closed or both. - * **`allow_no_indices` (Optional, boolean)**: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) - * **`ignore_unavailable` (Optional, boolean)**: Whether specified concrete indices should be ignored when unavailable (missing or closed) - - - -### mount [_mount] - -Mount a snapshot. Mount a snapshot as a searchable snapshot index. Do not use this API for snapshots managed by index lifecycle management (ILM). Manually mounting ILM-managed snapshots can interfere with ILM processes. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-searchable-snapshots-mount) - -```ts -client.searchableSnapshots.mount({ repository, snapshot, index }) -``` - - -### Arguments [_arguments_355] - -* **Request (object):** - - * **`repository` (string)**: The name of the repository containing the snapshot of the index to mount. - * **`snapshot` (string)**: The name of the snapshot of the index to mount. - * **`index` (string)**: The name of the index contained in the snapshot whose data is to be mounted. If no `renamed_index` is specified, this name will also be used to create the new index. - * **`renamed_index` (Optional, string)**: The name of the index that will be created. - * **`index_settings` (Optional, Record)**: The settings that should be added to the index when it is mounted. - * **`ignore_index_settings` (Optional, string[])**: The names of settings that should be removed from the index when it is mounted. - * **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. - * **`wait_for_completion` (Optional, boolean)**: If true, the request blocks until the operation is complete. - * **`storage` (Optional, string)**: The mount option for the searchable snapshot index. - - - -### stats [_stats_6] - -Get searchable snapshot statistics. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-searchable-snapshots-stats) - -```ts -client.searchableSnapshots.stats({ ... }) -``` - - -### Arguments [_arguments_356] - -* **Request (object):** - - * **`index` (Optional, string | string[])**: A list of data streams and indices to retrieve statistics for. - * **`level` (Optional, Enum("cluster" | "indices" | "shards"))**: Return stats aggregated at cluster, index or shard level - - - -## security [_security] - - -### activate_user_profile [_activate_user_profile] - -Activate a user profile. - -Create or update a user profile on behalf of another user. - -::::{note} -The user profile feature is designed only for use by Kibana and Elastic’s Observability, Search, and Elastic Security solutions. Individual users and external applications should not call this API directly. The calling application must have either an `access_token` or a combination of `username` and `password` for the user that the profile document is intended for. Elastic reserves the right to change or remove this feature in future releases without prior notice. -:::: - - -This API creates or updates a profile document for end users with information that is extracted from the user’s authentication object including `username`, `full_name,` `roles`, and the authentication realm. For example, in the JWT `access_token` case, the profile user’s `username` is extracted from the JWT token claim pointed to by the `claims.principal` setting of the JWT realm that authenticated the token. - -When updating a profile document, the API enables the document if it was disabled. Any updates do not change existing content for either the `labels` or `data` fields. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-activate-user-profile) - -```ts -client.security.activateUserProfile({ grant_type }) -``` - - -### Arguments [_arguments_357] - -* **Request (object):** - - * **`grant_type` (Enum("password" | "access_token"))**: The type of grant. - * **`access_token` (Optional, string)**: The user’s Elasticsearch access token or JWT. Both `access` and `id` JWT token types are supported and they depend on the underlying JWT realm configuration. If you specify the `access_token` grant type, this parameter is required. It is not valid with other grant types. - * **`password` (Optional, string)**: The user’s password. If you specify the `password` grant type, this parameter is required. It is not valid with other grant types. - * **`username` (Optional, string)**: The username that identifies the user. If you specify the `password` grant type, this parameter is required. It is not valid with other grant types. - - - -### authenticate [_authenticate] - -Authenticate a user. - -Authenticates a user and returns information about the authenticated user. Include the user information in a [basic auth header](https://en.wikipedia.org/wiki/Basic_access_authentication). A successful call returns a JSON structure that shows user information such as their username, the roles that are assigned to the user, any assigned metadata, and information about the realms that authenticated and authorized the user. If the user cannot be authenticated, this API returns a 401 status code. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-authenticate) - -```ts -client.security.authenticate() -``` - - -### bulk_delete_role [_bulk_delete_role] - -Bulk delete roles. - -The role management APIs are generally the preferred way to manage roles, rather than using file-based role management. The bulk delete roles API cannot delete roles that are defined in roles files. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-bulk-delete-role) - -```ts -client.security.bulkDeleteRole({ names }) -``` - - -### Arguments [_arguments_358] - -* **Request (object):** - - * **`names` (string[])**: An array of role names to delete - * **`refresh` (Optional, Enum(true | false | "wait_for"))**: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. - - - -### bulk_put_role [_bulk_put_role] - -Bulk create or update roles. - -The role management APIs are generally the preferred way to manage roles, rather than using file-based role management. The bulk create or update roles API cannot update roles that are defined in roles files. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-bulk-put-role) - -```ts -client.security.bulkPutRole({ roles }) -``` - - -### Arguments [_arguments_359] - -* **Request (object):** - - * **`roles` (Record)**: A dictionary of role name to RoleDescriptor objects to add or update - * **`refresh` (Optional, Enum(true | false | "wait_for"))**: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. - - - -### bulk_update_api_keys [_bulk_update_api_keys] - -Bulk update API keys. Update the attributes for multiple API keys. - -::::{important} -It is not possible to use an API key as the authentication credential for this API. To update API keys, the owner user’s credentials are required. -:::: - - -This API is similar to the update API key API but enables you to apply the same update to multiple API keys in one API call. This operation can greatly improve performance over making individual updates. - -It is not possible to update expired or invalidated API keys. - -This API supports updates to API key access scope, metadata and expiration. The access scope of each API key is derived from the `role_descriptors` you specify in the request and a snapshot of the owner user’s permissions at the time of the request. The snapshot of the owner’s permissions is updated automatically on every call. - -::::{important} -If you don’t specify `role_descriptors` in the request, a call to this API might still change an API key’s access scope. This change can occur if the owner user’s permissions have changed since the API key was created or last modified. -:::: - - -A successful request returns a JSON structure that contains the IDs of all updated API keys, the IDs of API keys that already had the requested changes and did not require an update, and error details for any failed update. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-bulk-update-api-keys) - -```ts -client.security.bulkUpdateApiKeys({ ids }) -``` - - -### Arguments [_arguments_360] - -* **Request (object):** - - * **`ids` (string | string[])**: The API key identifiers. - * **`expiration` (Optional, string | -1 | 0)**: Expiration time for the API keys. By default, API keys never expire. This property can be omitted to leave the value unchanged. - * **`metadata` (Optional, Record)**: Arbitrary nested metadata to associate with the API keys. Within the `metadata` object, top-level keys beginning with an underscore (`_`) are reserved for system usage. Any information specified with this parameter fully replaces metadata previously associated with the API key. - * **`role_descriptors` (Optional, Record)**: The role descriptors to assign to the API keys. An API key’s effective permissions are an intersection of its assigned privileges and the point-in-time snapshot of permissions of the owner user. You can assign new privileges by specifying them in this parameter. To remove assigned privileges, supply the `role_descriptors` parameter as an empty object `{}`. If an API key has no assigned privileges, it inherits the owner user’s full permissions. The snapshot of the owner’s permissions is always updated, whether you supply the `role_descriptors` parameter. The structure of a role descriptor is the same as the request for the create API keys API. - - - -### change_password [_change_password] - -Change passwords. - -Change the passwords of users in the native realm and built-in users. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-change-password) - -```ts -client.security.changePassword({ ... }) -``` - - -### Arguments [_arguments_361] - -* **Request (object):** - - * **`username` (Optional, string)**: The user whose password you want to change. If you do not specify this parameter, the password is changed for the current user. - * **`password` (Optional, string)**: The new password value. Passwords must be at least 6 characters long. - * **`password_hash` (Optional, string)**: A hash of the new password value. This must be produced using the same hashing algorithm as has been configured for password storage. For more details, see the explanation of the `xpack.security.authc.password_hashing.algorithm` setting. - * **`refresh` (Optional, Enum(true | false | "wait_for"))**: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. - - - -### clear_api_key_cache [_clear_api_key_cache] - -Clear the API key cache. - -Evict a subset of all entries from the API key cache. The cache is also automatically cleared on state changes of the security index. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-clear-api-key-cache) - -```ts -client.security.clearApiKeyCache({ ids }) -``` - - -### Arguments [_arguments_362] - -* **Request (object):** - - * **`ids` (string | string[])**: List of API key IDs to evict from the API key cache. To evict all API keys, use `*`. Does not support other wildcard patterns. - - - -### clear_cached_privileges [_clear_cached_privileges] - -Clear the privileges cache. - -Evict privileges from the native application privilege cache. The cache is also automatically cleared for applications that have their privileges updated. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-clear-cached-privileges) - -```ts -client.security.clearCachedPrivileges({ application }) -``` - - -### Arguments [_arguments_363] - -* **Request (object):** - - * **`application` (string)**: A list of applications. To clear all applications, use an asterism (`*`). It does not support other wildcard patterns. - - - -### clear_cached_realms [_clear_cached_realms] - -Clear the user cache. - -Evict users from the user cache. You can completely clear the cache or evict specific users. - -User credentials are cached in memory on each node to avoid connecting to a remote authentication service or hitting the disk for every incoming request. There are realm settings that you can use to configure the user cache. For more information, refer to the documentation about controlling the user cache. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-clear-cached-realms) - -```ts -client.security.clearCachedRealms({ realms }) -``` - - -### Arguments [_arguments_364] - -* **Request (object):** - - * **`realms` (string | string[])**: A list of realms. To clear all realms, use an asterisk (`*`). It does not support other wildcard patterns. - * **`usernames` (Optional, string[])**: A list of the users to clear from the cache. If you do not specify this parameter, the API evicts all users from the user cache. - - - -### clear_cached_roles [_clear_cached_roles] - -Clear the roles cache. - -Evict roles from the native role cache. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-clear-cached-roles) - -```ts -client.security.clearCachedRoles({ name }) -``` - - -### Arguments [_arguments_365] - -* **Request (object):** - - * **`name` (string | string[])**: A list of roles to evict from the role cache. To evict all roles, use an asterisk (`*`). It does not support other wildcard patterns. - - - -### clear_cached_service_tokens [_clear_cached_service_tokens] - -Clear service account token caches. - -Evict a subset of all entries from the service account token caches. Two separate caches exist for service account tokens: one cache for tokens backed by the `service_tokens` file, and another for tokens backed by the `.security` index. This API clears matching entries from both caches. - -The cache for service account tokens backed by the `.security` index is cleared automatically on state changes of the security index. The cache for tokens backed by the `service_tokens` file is cleared automatically on file changes. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-clear-cached-service-tokens) - -```ts -client.security.clearCachedServiceTokens({ namespace, service, name }) -``` - - -### Arguments [_arguments_366] - -* **Request (object):** - - * **`namespace` (string)**: The namespace, which is a top-level grouping of service accounts. - * **`service` (string)**: The name of the service, which must be unique within its namespace. - * **`name` (string | string[])**: A list of token names to evict from the service account token caches. Use a wildcard (`*`) to evict all tokens that belong to a service account. It does not support other wildcard patterns. - - - -### create_api_key [_create_api_key] - -Create an API key. - -Create an API key for access without requiring basic authentication. - -::::{important} -If the credential that is used to authenticate this request is an API key, the derived API key cannot have any privileges. If you specify privileges, the API returns an error. -:::: - - -A successful request returns a JSON structure that contains the API key, its unique id, and its name. If applicable, it also returns expiration information for the API key in milliseconds. - -::::{note} -By default, API keys never expire. You can specify expiration information when you create the API keys. -:::: - - -The API keys are created by the Elasticsearch API key service, which is automatically enabled. To configure or turn off the API key service, refer to API key service setting documentation. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-create-api-key) - -```ts -client.security.createApiKey({ ... }) -``` - - -### Arguments [_arguments_367] - -* **Request (object):** - - * **`expiration` (Optional, string | -1 | 0)**: The expiration time for the API key. By default, API keys never expire. - * **`name` (Optional, string)**: A name for the API key. - * **`role_descriptors` (Optional, Record)**: An array of role descriptors for this API key. When it is not specified or it is an empty array, the API key will have a point in time snapshot of permissions of the authenticated user. If you supply role descriptors, the resultant permissions are an intersection of API keys permissions and the authenticated user’s permissions thereby limiting the access scope for API keys. The structure of role descriptor is the same as the request for the create role API. For more details, refer to the create or update roles API. - - -::::{note} -Due to the way in which this permission intersection is calculated, it is not possible to create an API key that is a child of another API key, unless the derived key is created without any privileges. In this case, you must explicitly specify a role descriptor with no privileges. The derived API key can be used for authentication; it will not have authority to call Elasticsearch APIs. ** *`metadata` (Optional, Record)**: Arbitrary metadata that you want to associate with the API key. It supports nested data structure. Within the metadata object, keys beginning with `_` are reserved for system usage. ** *`refresh` (Optional, Enum(true | false | "wait_for"))**: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. -:::: - - - -### create_cross_cluster_api_key [_create_cross_cluster_api_key] - -Create a cross-cluster API key. - -Create an API key of the `cross_cluster` type for the API key based remote cluster access. A `cross_cluster` API key cannot be used to authenticate through the REST interface. - -::::{important} -To authenticate this request you must use a credential that is not an API key. Even if you use an API key that has the required privilege, the API returns an error. -:::: - - -Cross-cluster API keys are created by the Elasticsearch API key service, which is automatically enabled. - -::::{note} -Unlike REST API keys, a cross-cluster API key does not capture permissions of the authenticated user. The API key’s effective permission is exactly as specified with the `access` property. -:::: - - -A successful request returns a JSON structure that contains the API key, its unique ID, and its name. If applicable, it also returns expiration information for the API key in milliseconds. - -By default, API keys never expire. You can specify expiration information when you create the API keys. - -Cross-cluster API keys can only be updated with the update cross-cluster API key API. Attempting to update them with the update REST API key API or the bulk update REST API keys API will result in an error. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-create-cross-cluster-api-key) - -```ts -client.security.createCrossClusterApiKey({ access, name }) -``` - - -### Arguments [_arguments_368] - -* **Request (object):** - - * **`access` ({ replication, search })**: The access to be granted to this API key. The access is composed of permissions for cross-cluster search and cross-cluster replication. At least one of them must be specified. - - -::::{note} -No explicit privileges should be specified for either search or replication access. The creation process automatically converts the access specification to a role descriptor which has relevant privileges assigned accordingly. ** *`name` (string)**: Specifies the name for this API key. *** *`expiration` (Optional, string | -1 | 0)**: Expiration time for the API key. By default, API keys never expire. ** *`metadata` (Optional, Record)**: Arbitrary metadata that you want to associate with the API key. It supports nested data structure. Within the metadata object, keys beginning with `_` are reserved for system usage. -:::: - - - -### create_service_token [_create_service_token] - -Create a service account token. - -Create a service accounts token for access without requiring basic authentication. - -::::{note} -Service account tokens never expire. You must actively delete them if they are no longer needed. -:::: - - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-create-service-token) - -```ts -client.security.createServiceToken({ namespace, service }) -``` - - -### Arguments [_arguments_369] - -* **Request (object):** - - * **`namespace` (string)**: The name of the namespace, which is a top-level grouping of service accounts. - * **`service` (string)**: The name of the service. - * **`name` (Optional, string)**: The name for the service account token. If omitted, a random name will be generated. - - -Token names must be at least one and no more than 256 characters. They can contain alphanumeric characters (a-z, A-Z, 0-9), dashes (`-`), and underscores (`_`), but cannot begin with an underscore. - -::::{note} -Token names must be unique in the context of the associated service account. They must also be globally unique with their fully qualified names, which are comprised of the service account principal and token name, such as `//`. *** *`refresh` (Optional, Enum(true | false | "wait_for"))**: If `true` then refresh the affected shards to make this operation visible to search, if `wait_for` (the default) then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. -:::: - - - -### delegate_pki [_delegate_pki] - -Delegate PKI authentication. - -This API implements the exchange of an X509Certificate chain for an Elasticsearch access token. The certificate chain is validated, according to RFC 5280, by sequentially considering the trust configuration of every installed PKI realm that has `delegation.enabled` set to `true`. A successfully trusted client certificate is also subject to the validation of the subject distinguished name according to thw `username_pattern` of the respective realm. - -This API is called by smart and trusted proxies, such as Kibana, which terminate the user’s TLS session but still want to authenticate the user by using a PKI realm—-​as if the user connected directly to Elasticsearch. - -::::{important} -The association between the subject public key in the target certificate and the corresponding private key is not validated. This is part of the TLS authentication process and it is delegated to the proxy that calls this API. The proxy is trusted to have performed the TLS authentication and this API translates that authentication into an Elasticsearch access token. -:::: - - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-delegate-pki) - -```ts -client.security.delegatePki({ x509_certificate_chain }) -``` - - -### Arguments [_arguments_370] - -* **Request (object):** - - * **`x509_certificate_chain` (string[])**: The X509Certificate chain, which is represented as an ordered string array. Each string in the array is a base64-encoded (Section 4 of RFC4648 - not base64url-encoded) of the certificate’s DER encoding. - - -The first element is the target certificate that contains the subject distinguished name that is requesting access. This may be followed by additional certificates; each subsequent certificate is used to certify the previous one. - - -### delete_privileges [_delete_privileges] - -Delete application privileges. - -To use this API, you must have one of the following privileges: - -* The `manage_security` cluster privilege (or a greater privilege such as `all`). -* The "Manage Application Privileges" global privilege for the application being referenced in the request. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-delete-privileges) - -```ts -client.security.deletePrivileges({ application, name }) -``` - - -### Arguments [_arguments_371] - -* **Request (object):** - - * **`application` (string)**: The name of the application. Application privileges are always associated with exactly one application. - * **`name` (string | string[])**: The name of the privilege. - * **`refresh` (Optional, Enum(true | false | "wait_for"))**: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. - - - -### delete_role [_delete_role] - -Delete roles. - -Delete roles in the native realm. The role management APIs are generally the preferred way to manage roles, rather than using file-based role management. The delete roles API cannot remove roles that are defined in roles files. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-delete-role) - -```ts -client.security.deleteRole({ name }) -``` - - -### Arguments [_arguments_372] - -* **Request (object):** - - * **`name` (string)**: The name of the role. - * **`refresh` (Optional, Enum(true | false | "wait_for"))**: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. - - - -### delete_role_mapping [_delete_role_mapping] - -Delete role mappings. - -Role mappings define which roles are assigned to each user. The role mapping APIs are generally the preferred way to manage role mappings rather than using role mapping files. The delete role mappings API cannot remove role mappings that are defined in role mapping files. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-delete-role-mapping) - -```ts -client.security.deleteRoleMapping({ name }) -``` - - -### Arguments [_arguments_373] - -* **Request (object):** - - * **`name` (string)**: The distinct name that identifies the role mapping. The name is used solely as an identifier to facilitate interaction via the API; it does not affect the behavior of the mapping in any way. - * **`refresh` (Optional, Enum(true | false | "wait_for"))**: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. - - - -### delete_service_token [_delete_service_token] - -Delete service account tokens. - -Delete service account tokens for a service in a specified namespace. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-delete-service-token) - -```ts -client.security.deleteServiceToken({ namespace, service, name }) -``` - - -### Arguments [_arguments_374] - -* **Request (object):** - - * **`namespace` (string)**: The namespace, which is a top-level grouping of service accounts. - * **`service` (string)**: The service name. - * **`name` (string)**: The name of the service account token. - * **`refresh` (Optional, Enum(true | false | "wait_for"))**: If `true` then refresh the affected shards to make this operation visible to search, if `wait_for` (the default) then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. - - - -### delete_user [_delete_user] - -Delete users. - -Delete users from the native realm. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-delete-user) - -```ts -client.security.deleteUser({ username }) -``` - - -### Arguments [_arguments_375] - -* **Request (object):** - - * **`username` (string)**: An identifier for the user. - * **`refresh` (Optional, Enum(true | false | "wait_for"))**: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. - - - -### disable_user [_disable_user] - -Disable users. - -Disable users in the native realm. By default, when you create users, they are enabled. You can use this API to revoke a user’s access to Elasticsearch. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-disable-user) - -```ts -client.security.disableUser({ username }) -``` - - -### Arguments [_arguments_376] - -* **Request (object):** - - * **`username` (string)**: An identifier for the user. - * **`refresh` (Optional, Enum(true | false | "wait_for"))**: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. - - - -### disable_user_profile [_disable_user_profile] - -Disable a user profile. - -Disable user profiles so that they are not visible in user profile searches. - -::::{note} -The user profile feature is designed only for use by Kibana and Elastic’s Observability, Search and Elastic Security solutions. Individual users and external applications should not call this API directly. Elastic reserves the right to change or remove this feature in future releases without prior notice. -:::: - - -When you activate a user profile, its automatically enabled and visible in user profile searches. You can use the disable user profile API to disable a user profile so it’s not visible in these searches. To re-enable a disabled user profile, use the enable user profile API . - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-disable-user-profile) - -```ts -client.security.disableUserProfile({ uid }) -``` - - -### Arguments [_arguments_377] - -* **Request (object):** - - * **`uid` (string)**: Unique identifier for the user profile. - * **`refresh` (Optional, Enum(true | false | "wait_for"))**: If *true*, Elasticsearch refreshes the affected shards to make this operation visible to search. If *wait_for*, it waits for a refresh to make this operation visible to search. If *false*, it does nothing with refreshes. - - - -### enable_user [_enable_user] - -Enable users. - -Enable users in the native realm. By default, when you create users, they are enabled. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-enable-user) - -```ts -client.security.enableUser({ username }) -``` - - -### Arguments [_arguments_378] - -* **Request (object):** - - * **`username` (string)**: An identifier for the user. - * **`refresh` (Optional, Enum(true | false | "wait_for"))**: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. - - - -### enable_user_profile [_enable_user_profile] - -Enable a user profile. - -Enable user profiles to make them visible in user profile searches. - -::::{note} -The user profile feature is designed only for use by Kibana and Elastic’s Observability, Search and Elastic Security solutions. Individual users and external applications should not call this API directly. Elastic reserves the right to change or remove this feature in future releases without prior notice. -:::: - - -When you activate a user profile, it’s automatically enabled and visible in user profile searches. If you later disable the user profile, you can use the enable user profile API to make the profile visible in these searches again. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-enable-user-profile) - -```ts -client.security.enableUserProfile({ uid }) -``` - - -### Arguments [_arguments_379] - -* **Request (object):** - - * **`uid` (string)**: A unique identifier for the user profile. - * **`refresh` (Optional, Enum(true | false | "wait_for"))**: If *true*, Elasticsearch refreshes the affected shards to make this operation visible to search. If *wait_for*, it waits for a refresh to make this operation visible to search. If *false*, nothing is done with refreshes. - - - -### enroll_kibana [_enroll_kibana] - -Enroll Kibana. - -Enable a Kibana instance to configure itself for communication with a secured Elasticsearch cluster. - -::::{note} -This API is currently intended for internal use only by Kibana. Kibana uses this API internally to configure itself for communications with an Elasticsearch cluster that already has security features enabled. -:::: - - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-enroll-kibana) - -```ts -client.security.enrollKibana() -``` - - -### enroll_node [_enroll_node] - -Enroll a node. - -Enroll a new node to allow it to join an existing cluster with security features enabled. - -The response contains all the necessary information for the joining node to bootstrap discovery and security related settings so that it can successfully join the cluster. The response contains key and certificate material that allows the caller to generate valid signed certificates for the HTTP layer of all nodes in the cluster. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-enroll-node) - -```ts -client.security.enrollNode() -``` - - -### get_api_key [_get_api_key] - -Get API key information. - -Retrieves information for one or more API keys. NOTE: If you have only the `manage_own_api_key` privilege, this API returns only the API keys that you own. If you have `read_security`, `manage_api_key` or greater privileges (including `manage_security`), this API returns all API keys regardless of ownership. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-api-key) - -```ts -client.security.getApiKey({ ... }) -``` - - -### Arguments [_arguments_380] - -* **Request (object):** - - * **`id` (Optional, string)**: An API key id. This parameter cannot be used with any of `name`, `realm_name` or `username`. - * **`name` (Optional, string)**: An API key name. This parameter cannot be used with any of `id`, `realm_name` or `username`. It supports prefix search with wildcard. - * **`owner` (Optional, boolean)**: A boolean flag that can be used to query API keys owned by the currently authenticated user. The `realm_name` or `username` parameters cannot be specified when this parameter is set to `true` as they are assumed to be the currently authenticated ones. - * **`realm_name` (Optional, string)**: The name of an authentication realm. This parameter cannot be used with either `id` or `name` or when `owner` flag is set to `true`. - * **`username` (Optional, string)**: The username of a user. This parameter cannot be used with either `id` or `name` or when `owner` flag is set to `true`. - * **`with_limited_by` (Optional, boolean)**: Return the snapshot of the owner user’s role descriptors associated with the API key. An API key’s actual permission is the intersection of its assigned role descriptors and the owner user’s role descriptors. - * **`active_only` (Optional, boolean)**: A boolean flag that can be used to query API keys that are currently active. An API key is considered active if it is neither invalidated, nor expired at query time. You can specify this together with other parameters such as `owner` or `name`. If `active_only` is false, the response will include both active and inactive (expired or invalidated) keys. - * **`with_profile_uid` (Optional, boolean)**: Determines whether to also retrieve the profile uid, for the API key owner principal, if it exists. - - - -### get_builtin_privileges [_get_builtin_privileges] - -Get builtin privileges. - -Get the list of cluster privileges and index privileges that are available in this version of Elasticsearch. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-builtin-privileges) - -```ts -client.security.getBuiltinPrivileges() -``` - - -### get_privileges [_get_privileges] - -Get application privileges. - -To use this API, you must have one of the following privileges: - -* The `read_security` cluster privilege (or a greater privilege such as `manage_security` or `all`). -* The "Manage Application Privileges" global privilege for the application being referenced in the request. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-privileges) - -```ts -client.security.getPrivileges({ ... }) -``` - - -### Arguments [_arguments_381] - -* **Request (object):** - - * **`application` (Optional, string)**: The name of the application. Application privileges are always associated with exactly one application. If you do not specify this parameter, the API returns information about all privileges for all applications. - * **`name` (Optional, string | string[])**: The name of the privilege. If you do not specify this parameter, the API returns information about all privileges for the requested application. - - - -### get_role [_get_role] - -Get roles. - -Get roles in the native realm. The role management APIs are generally the preferred way to manage roles, rather than using file-based role management. The get roles API cannot retrieve roles that are defined in roles files. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-role) - -```ts -client.security.getRole({ ... }) -``` - - -### Arguments [_arguments_382] - -* **Request (object):** - - * **`name` (Optional, string | string[])**: The name of the role. You can specify multiple roles as a list. If you do not specify this parameter, the API returns information about all roles. - - - -### get_role_mapping [_get_role_mapping] - -Get role mappings. - -Role mappings define which roles are assigned to each user. The role mapping APIs are generally the preferred way to manage role mappings rather than using role mapping files. The get role mappings API cannot retrieve role mappings that are defined in role mapping files. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-role-mapping) - -```ts -client.security.getRoleMapping({ ... }) -``` - - -### Arguments [_arguments_383] - -* **Request (object):** - - * **`name` (Optional, string | string[])**: The distinct name that identifies the role mapping. The name is used solely as an identifier to facilitate interaction via the API; it does not affect the behavior of the mapping in any way. You can specify multiple mapping names as a list. If you do not specify this parameter, the API returns information about all role mappings. - - - -### get_service_accounts [_get_service_accounts] - -Get service accounts. - -Get a list of service accounts that match the provided path parameters. - -::::{note} -Currently, only the `elastic/fleet-server` service account is available. -:::: - - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-service-accounts) - -```ts -client.security.getServiceAccounts({ ... }) -``` - - -### Arguments [_arguments_384] - -* **Request (object):** - - * **`namespace` (Optional, string)**: The name of the namespace. Omit this parameter to retrieve information about all service accounts. If you omit this parameter, you must also omit the `service` parameter. - * **`service` (Optional, string)**: The service name. Omit this parameter to retrieve information about all service accounts that belong to the specified `namespace`. - - - -### get_service_credentials [_get_service_credentials] - -Get service account credentials. - -To use this API, you must have at least the `read_security` cluster privilege (or a greater privilege such as `manage_service_account` or `manage_security`). - -The response includes service account tokens that were created with the create service account tokens API as well as file-backed tokens from all nodes of the cluster. - -::::{note} -For tokens backed by the `service_tokens` file, the API collects them from all nodes of the cluster. Tokens with the same name from different nodes are assumed to be the same token and are only counted once towards the total number of service tokens. -:::: - - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-service-credentials) - -```ts -client.security.getServiceCredentials({ namespace, service }) -``` - - -### Arguments [_arguments_385] - -* **Request (object):** - - * **`namespace` (string)**: The name of the namespace. - * **`service` (string)**: The service name. - - - -### get_settings [_get_settings_3] - -Get security index settings. - -Get the user-configurable settings for the security internal index (`.security` and associated indices). Only a subset of the index settings — those that are user-configurable—will be shown. This includes: - -* `index.auto_expand_replicas` -* `index.number_of_replicas` - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-settings) - -```ts -client.security.getSettings({ ... }) -``` - - -### Arguments [_arguments_386] - -* **Request (object):** - - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - - - -### get_token [_get_token] - -Get a token. - -Create a bearer token for access without requiring basic authentication. The tokens are created by the Elasticsearch Token Service, which is automatically enabled when you configure TLS on the HTTP interface. Alternatively, you can explicitly enable the `xpack.security.authc.token.enabled` setting. When you are running in production mode, a bootstrap check prevents you from enabling the token service unless you also enable TLS on the HTTP interface. - -The get token API takes the same parameters as a typical OAuth 2.0 token API except for the use of a JSON request body. - -A successful get token API call returns a JSON structure that contains the access token, the amount of time (seconds) that the token expires in, the type, and the scope if available. - -The tokens returned by the get token API have a finite period of time for which they are valid and after that time period, they can no longer be used. That time period is defined by the `xpack.security.authc.token.timeout` setting. If you want to invalidate a token immediately, you can do so by using the invalidate token API. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-token) - -```ts -client.security.getToken({ ... }) -``` - - -### Arguments [_arguments_387] - -* **Request (object):** - - * **`grant_type` (Optional, Enum("password" | "client_credentials" | "_kerberos" | "refresh_token"))**: The type of grant. Supported grant types are: `password`, `_kerberos`, `client_credentials`, and `refresh_token`. - * **`scope` (Optional, string)**: The scope of the token. Currently tokens are only issued for a scope of FULL regardless of the value sent with the request. - * **`password` (Optional, string)**: The user’s password. If you specify the `password` grant type, this parameter is required. This parameter is not valid with any other supported grant type. - * **`kerberos_ticket` (Optional, string)**: The base64 encoded kerberos ticket. If you specify the `_kerberos` grant type, this parameter is required. This parameter is not valid with any other supported grant type. - * **`refresh_token` (Optional, string)**: The string that was returned when you created the token, which enables you to extend its life. If you specify the `refresh_token` grant type, this parameter is required. This parameter is not valid with any other supported grant type. - * **`username` (Optional, string)**: The username that identifies the user. If you specify the `password` grant type, this parameter is required. This parameter is not valid with any other supported grant type. - - - -### get_user [_get_user] - -Get users. - -Get information about users in the native realm and built-in users. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-user) - -```ts -client.security.getUser({ ... }) -``` - - -### Arguments [_arguments_388] - -* **Request (object):** - - * **`username` (Optional, string | string[])**: An identifier for the user. You can specify multiple usernames as a list. If you omit this parameter, the API retrieves information about all users. - * **`with_profile_uid` (Optional, boolean)**: Determines whether to retrieve the user profile UID, if it exists, for the users. - - - -### get_user_privileges [_get_user_privileges] - -Get user privileges. - -Get the security privileges for the logged in user. All users can use this API, but only to determine their own privileges. To check the privileges of other users, you must use the run as feature. To check whether a user has a specific list of privileges, use the has privileges API. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-user-privileges) - -```ts -client.security.getUserPrivileges({ ... }) -``` - - -### Arguments [_arguments_389] - -* **Request (object):** - - * **`application` (Optional, string)**: The name of the application. Application privileges are always associated with exactly one application. If you do not specify this parameter, the API returns information about all privileges for all applications. - * **`priviledge` (Optional, string)**: The name of the privilege. If you do not specify this parameter, the API returns information about all privileges for the requested application. - * **`username` (Optional, string | null)** - - - -### get_user_profile [_get_user_profile] - -Get a user profile. - -Get a user’s profile using the unique profile ID. - -::::{note} -The user profile feature is designed only for use by Kibana and Elastic’s Observability, Search and Elastic Security solutions. Individual users and external applications should not call this API directly. Elastic reserves the right to change or remove this feature in future releases without prior notice. -:::: - - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-user-profile) - -```ts -client.security.getUserProfile({ uid }) -``` - - -### Arguments [_arguments_390] - -* **Request (object):** - - * **`uid` (string | string[])**: A unique identifier for the user profile. - * **`data` (Optional, string | string[])**: A list of filters for the `data` field of the profile document. To return all content use `data=*`. To return a subset of content use `data=` to retrieve content nested under the specified ``. By default returns no `data` content. - - - -### grant_api_key [_grant_api_key] - -Grant an API key. - -Create an API key on behalf of another user. This API is similar to the create API keys API, however it creates the API key for a user that is different than the user that runs the API. The caller must have authentication credentials for the user on whose behalf the API key will be created. It is not possible to use this API to create an API key without that user’s credentials. The supported user authentication credential types are: - -* username and password -* Elasticsearch access tokens -* JWTs - -The user, for whom the authentication credentials is provided, can optionally "run as" (impersonate) another user. In this case, the API key will be created on behalf of the impersonated user. - -This API is intended be used by applications that need to create and manage API keys for end users, but cannot guarantee that those users have permission to create API keys on their own behalf. The API keys are created by the Elasticsearch API key service, which is automatically enabled. - -A successful grant API key API call returns a JSON structure that contains the API key, its unique id, and its name. If applicable, it also returns expiration information for the API key in milliseconds. - -By default, API keys never expire. You can specify expiration information when you create the API keys. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-grant-api-key) - -```ts -client.security.grantApiKey({ api_key, grant_type }) -``` - - -### Arguments [_arguments_391] - -* **Request (object):** - - * **`api_key` ({ name, expiration, role_descriptors, metadata })**: The API key. - * **`grant_type` (Enum("access_token" | "password"))**: The type of grant. Supported grant types are: `access_token`, `password`. - * **`access_token` (Optional, string)**: The user’s access token. If you specify the `access_token` grant type, this parameter is required. It is not valid with other grant types. - * **`username` (Optional, string)**: The user name that identifies the user. If you specify the `password` grant type, this parameter is required. It is not valid with other grant types. - * **`password` (Optional, string)**: The user’s password. If you specify the `password` grant type, this parameter is required. It is not valid with other grant types. - * **`run_as` (Optional, string)**: The name of the user to be impersonated. - - - -### has_privileges [_has_privileges] - -Check user privileges. - -Determine whether the specified user has a specified list of privileges. All users can use this API, but only to determine their own privileges. To check the privileges of other users, you must use the run as feature. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-has-privileges) - -```ts -client.security.hasPrivileges({ ... }) -``` - - -### Arguments [_arguments_392] - -* **Request (object):** - - * **`user` (Optional, string)**: Username - * **`application` (Optional, { application, privileges, resources }[])** - * **`cluster` (Optional, Enum("all" | "cancel_task" | "create_snapshot" | "cross_cluster_replication" | "cross_cluster_search" | "delegate_pki" | "grant_api_key" | "manage" | "manage_api_key" | "manage_autoscaling" | "manage_behavioral_analytics" | "manage_ccr" | "manage_data_frame_transforms" | "manage_data_stream_global_retention" | "manage_enrich" | "manage_ilm" | "manage_index_templates" | "manage_inference" | "manage_ingest_pipelines" | "manage_logstash_pipelines" | "manage_ml" | "manage_oidc" | "manage_own_api_key" | "manage_pipeline" | "manage_rollup" | "manage_saml" | "manage_search_application" | "manage_search_query_rules" | "manage_search_synonyms" | "manage_security" | "manage_service_account" | "manage_slm" | "manage_token" | "manage_transform" | "manage_user_profile" | "manage_watcher" | "monitor" | "monitor_data_frame_transforms" | "monitor_data_stream_global_retention" | "monitor_enrich" | "monitor_inference" | "monitor_ml" | "monitor_rollup" | "monitor_snapshot" | "monitor_stats" | "monitor_text_structure" | "monitor_transform" | "monitor_watcher" | "none" | "post_behavioral_analytics_event" | "read_ccr" | "read_fleet_secrets" | "read_ilm" | "read_pipeline" | "read_security" | "read_slm" | "transport_client" | "write_connector_secrets" | "write_fleet_secrets")[])**: A list of the cluster privileges that you want to check. - * **`index` (Optional, { names, privileges, allow_restricted_indices }[])** - - - -### has_privileges_user_profile [_has_privileges_user_profile] - -Check user profile privileges. - -Determine whether the users associated with the specified user profile IDs have all the requested privileges. - -::::{note} -The user profile feature is designed only for use by Kibana and Elastic’s Observability, Search and Elastic Security solutions. Individual users and external applications should not call this API directly. Elastic reserves the right to change or remove this feature in future releases without prior notice. -:::: - - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-has-privileges-user-profile) - -```ts -client.security.hasPrivilegesUserProfile({ uids, privileges }) -``` - - -### Arguments [_arguments_393] - -* **Request (object):** - - * **`uids` (string[])**: A list of profile IDs. The privileges are checked for associated users of the profiles. - * **`privileges` ({ application, cluster, index })**: An object containing all the privileges to be checked. - - - -### invalidate_api_key [_invalidate_api_key] - -Invalidate API keys. - -This API invalidates API keys created by the create API key or grant API key APIs. Invalidated API keys fail authentication, but they can still be viewed using the get API key information and query API key information APIs, for at least the configured retention period, until they are automatically deleted. - -To use this API, you must have at least the `manage_security`, `manage_api_key`, or `manage_own_api_key` cluster privileges. The `manage_security` privilege allows deleting any API key, including both REST and cross cluster API keys. The `manage_api_key` privilege allows deleting any REST API key, but not cross cluster API keys. The `manage_own_api_key` only allows deleting REST API keys that are owned by the user. In addition, with the `manage_own_api_key` privilege, an invalidation request must be issued in one of the three formats: - -* Set the parameter `owner=true`. -* Or, set both `username` and `realm_name` to match the user’s identity. -* Or, if the request is issued by an API key, that is to say an API key invalidates itself, specify its ID in the `ids` field. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-invalidate-api-key) - -```ts -client.security.invalidateApiKey({ ... }) -``` - - -### Arguments [_arguments_394] - -* **Request (object):** - - * **`id` (Optional, string)** - * **`ids` (Optional, string[])**: A list of API key ids. This parameter cannot be used with any of `name`, `realm_name`, or `username`. - * **`name` (Optional, string)**: An API key name. This parameter cannot be used with any of `ids`, `realm_name` or `username`. - * **`owner` (Optional, boolean)**: Query API keys owned by the currently authenticated user. The `realm_name` or `username` parameters cannot be specified when this parameter is set to `true` as they are assumed to be the currently authenticated ones. - - -::::{note} -At least one of `ids`, `name`, `username`, and `realm_name` must be specified if `owner` is `false`. ** *`realm_name` (Optional, string)**: The name of an authentication realm. This parameter cannot be used with either `ids` or `name`, or when `owner` flag is set to `true`. ** *`username` (Optional, string)**: The username of a user. This parameter cannot be used with either `ids` or `name` or when `owner` flag is set to `true`. -:::: - - - -### invalidate_token [_invalidate_token] - -Invalidate a token. - -The access tokens returned by the get token API have a finite period of time for which they are valid. After that time period, they can no longer be used. The time period is defined by the `xpack.security.authc.token.timeout` setting. - -The refresh tokens returned by the get token API are only valid for 24 hours. They can also be used exactly once. If you want to invalidate one or more access or refresh tokens immediately, use this invalidate token API. - -::::{note} -While all parameters are optional, at least one of them is required. More specifically, either one of `token` or `refresh_token` parameters is required. If none of these two are specified, then `realm_name` and/or `username` need to be specified. -:::: - - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-invalidate-token) - -```ts -client.security.invalidateToken({ ... }) -``` - - -### Arguments [_arguments_395] - -* **Request (object):** - - * **`token` (Optional, string)**: An access token. This parameter cannot be used if any of `refresh_token`, `realm_name`, or `username` are used. - * **`refresh_token` (Optional, string)**: A refresh token. This parameter cannot be used if any of `refresh_token`, `realm_name`, or `username` are used. - * **`realm_name` (Optional, string)**: The name of an authentication realm. This parameter cannot be used with either `refresh_token` or `token`. - * **`username` (Optional, string)**: The username of a user. This parameter cannot be used with either `refresh_token` or `token`. - - - -### oidc_authenticate [_oidc_authenticate] - -Authenticate OpenID Connect. - -Exchange an OpenID Connect authentication response message for an Elasticsearch internal access token and refresh token that can be subsequently used for authentication. - -Elasticsearch exposes all the necessary OpenID Connect related functionality with the OpenID Connect APIs. These APIs are used internally by Kibana in order to provide OpenID Connect based authentication, but can also be used by other, custom web applications or other clients. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-oidc-authenticate) - -```ts -client.security.oidcAuthenticate({ nonce, redirect_uri, state }) -``` - - -### Arguments [_arguments_396] - -* **Request (object):** - - * **`nonce` (string)**: Associate a client session with an ID token and mitigate replay attacks. This value needs to be the same as the one that was provided to the `/_security/oidc/prepare` API or the one that was generated by Elasticsearch and included in the response to that call. - * **`redirect_uri` (string)**: The URL to which the OpenID Connect Provider redirected the User Agent in response to an authentication request after a successful authentication. This URL must be provided as-is (URL encoded), taken from the body of the response or as the value of a location header in the response from the OpenID Connect Provider. - * **`state` (string)**: Maintain state between the authentication request and the response. This value needs to be the same as the one that was provided to the `/_security/oidc/prepare` API or the one that was generated by Elasticsearch and included in the response to that call. - * **`realm` (Optional, string)**: The name of the OpenID Connect realm. This property is useful in cases where multiple realms are defined. - - - -### oidc_logout [_oidc_logout] - -Logout of OpenID Connect. - -Invalidate an access token and a refresh token that were generated as a response to the `/_security/oidc/authenticate` API. - -If the OpenID Connect authentication realm in Elasticsearch is accordingly configured, the response to this call will contain a URI pointing to the end session endpoint of the OpenID Connect Provider in order to perform single logout. - -Elasticsearch exposes all the necessary OpenID Connect related functionality with the OpenID Connect APIs. These APIs are used internally by Kibana in order to provide OpenID Connect based authentication, but can also be used by other, custom web applications or other clients. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-oidc-logout) - -```ts -client.security.oidcLogout({ access_token }) -``` - - -### Arguments [_arguments_397] - -* **Request (object):** - - * **`access_token` (string)**: The access token to be invalidated. - * **`refresh_token` (Optional, string)**: The refresh token to be invalidated. - - - -### oidc_prepare_authentication [_oidc_prepare_authentication] - -Prepare OpenID connect authentication. - -Create an oAuth 2.0 authentication request as a URL string based on the configuration of the OpenID Connect authentication realm in Elasticsearch. - -The response of this API is a URL pointing to the Authorization Endpoint of the configured OpenID Connect Provider, which can be used to redirect the browser of the user in order to continue the authentication process. - -Elasticsearch exposes all the necessary OpenID Connect related functionality with the OpenID Connect APIs. These APIs are used internally by Kibana in order to provide OpenID Connect based authentication, but can also be used by other, custom web applications or other clients. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-oidc-prepare-authentication) - -```ts -client.security.oidcPrepareAuthentication({ ... }) -``` - - -### Arguments [_arguments_398] - -* **Request (object):** - - * **`iss` (Optional, string)**: In the case of a third party initiated single sign on, this is the issuer identifier for the OP that the RP is to send the authentication request to. It cannot be specified when **realm** is specified. One of **realm** or **iss** is required. - * **`login_hint` (Optional, string)**: In the case of a third party initiated single sign on, it is a string value that is included in the authentication request as the **login_hint** parameter. This parameter is not valid when **realm** is specified. - * **`nonce` (Optional, string)**: The value used to associate a client session with an ID token and to mitigate replay attacks. If the caller of the API does not provide a value, Elasticsearch will generate one with sufficient entropy and return it in the response. - * **`realm` (Optional, string)**: The name of the OpenID Connect realm in Elasticsearch the configuration of which should be used in order to generate the authentication request. It cannot be specified when **iss** is specified. One of **realm** or **iss** is required. - * **`state` (Optional, string)**: The value used to maintain state between the authentication request and the response, typically used as a Cross-Site Request Forgery mitigation. If the caller of the API does not provide a value, Elasticsearch will generate one with sufficient entropy and return it in the response. - - - -### put_privileges [_put_privileges] - -Create or update application privileges. - -To use this API, you must have one of the following privileges: - -* The `manage_security` cluster privilege (or a greater privilege such as `all`). -* The "Manage Application Privileges" global privilege for the application being referenced in the request. - -Application names are formed from a prefix, with an optional suffix that conform to the following rules: - -* The prefix must begin with a lowercase ASCII letter. -* The prefix must contain only ASCII letters or digits. -* The prefix must be at least 3 characters long. -* If the suffix exists, it must begin with either a dash `-` or `_`. -* The suffix cannot contain any of the following characters: `\`, `/`, `*`, `?`, `"`, `<`, `>`, `|`, `,`, `*`. -* No part of the name can contain whitespace. - -Privilege names must begin with a lowercase ASCII letter and must contain only ASCII letters and digits along with the characters `_`, `-`, and `.`. - -Action names can contain any number of printable ASCII characters and must contain at least one of the following characters: `/`, `*`, `:`. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-put-privileges) - -```ts -client.security.putPrivileges({ ... }) -``` - - -### Arguments [_arguments_399] - -* **Request (object):** - - * **`privileges` (Optional, Record>)** - * **`refresh` (Optional, Enum(true | false | "wait_for"))**: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. - - - -### put_role [_put_role] - -Create or update roles. - -The role management APIs are generally the preferred way to manage roles in the native realm, rather than using file-based role management. The create or update roles API cannot update roles that are defined in roles files. File-based role management is not available in Elastic Serverless. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-put-role) - -```ts -client.security.putRole({ name }) -``` - - -### Arguments [_arguments_400] - -* **Request (object):** - - * **`name` (string)**: The name of the role that is being created or updated. On Elasticsearch Serverless, the role name must begin with a letter or digit and can only contain letters, digits and the characters *_*, *-*, and *.*. Each role must have a unique name, as this will serve as the identifier for that role. - * **`applications` (Optional, { application, privileges, resources }[])**: A list of application privilege entries. - * **`cluster` (Optional, Enum("all" | "cancel_task" | "create_snapshot" | "cross_cluster_replication" | "cross_cluster_search" | "delegate_pki" | "grant_api_key" | "manage" | "manage_api_key" | "manage_autoscaling" | "manage_behavioral_analytics" | "manage_ccr" | "manage_data_frame_transforms" | "manage_data_stream_global_retention" | "manage_enrich" | "manage_ilm" | "manage_index_templates" | "manage_inference" | "manage_ingest_pipelines" | "manage_logstash_pipelines" | "manage_ml" | "manage_oidc" | "manage_own_api_key" | "manage_pipeline" | "manage_rollup" | "manage_saml" | "manage_search_application" | "manage_search_query_rules" | "manage_search_synonyms" | "manage_security" | "manage_service_account" | "manage_slm" | "manage_token" | "manage_transform" | "manage_user_profile" | "manage_watcher" | "monitor" | "monitor_data_frame_transforms" | "monitor_data_stream_global_retention" | "monitor_enrich" | "monitor_inference" | "monitor_ml" | "monitor_rollup" | "monitor_snapshot" | "monitor_stats" | "monitor_text_structure" | "monitor_transform" | "monitor_watcher" | "none" | "post_behavioral_analytics_event" | "read_ccr" | "read_fleet_secrets" | "read_ilm" | "read_pipeline" | "read_security" | "read_slm" | "transport_client" | "write_connector_secrets" | "write_fleet_secrets")[])**: A list of cluster privileges. These privileges define the cluster-level actions for users with this role. - * **`global` (Optional, Record)**: An object defining global privileges. A global privilege is a form of cluster privilege that is request-aware. Support for global privileges is currently limited to the management of application privileges. - * **`indices` (Optional, { field_security, names, privileges, query, allow_restricted_indices }[])**: A list of indices permissions entries. - * **`remote_indices` (Optional, { clusters, field_security, names, privileges, query, allow_restricted_indices }[])**: A list of remote indices permissions entries. - - -::::{note} -Remote indices are effective for remote clusters configured with the API key based model. They have no effect for remote clusters configured with the certificate based model. ** *`remote_cluster` (Optional, { clusters, privileges }[])**: A list of remote cluster permissions entries. *** *`metadata` (Optional, Record)**: Optional metadata. Within the metadata object, keys that begin with an underscore (`_`) are reserved for system use. *** *`run_as` (Optional, string[])**: A list of users that the owners of this role can impersonate. **Note**: in Serverless, the run-as feature is disabled. For API compatibility, you can still specify an empty `run_as` field, but a non-empty list will be rejected. *** *`description` (Optional, string)**: Optional description of the role descriptor *** *`transient_metadata` (Optional, Record)**: Indicates roles that might be incompatible with the current cluster license, specifically roles with document and field level security. When the cluster license doesn’t allow certain features for a given role, this parameter is updated dynamically to list the incompatible features. If `enabled` is `false`, the role is ignored, but is still listed in the response from the authenticate API. ** *`refresh` (Optional, Enum(true | false | "wait_for"))**: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. -:::: - - - -### put_role_mapping [_put_role_mapping] - -Create or update role mappings. - -Role mappings define which roles are assigned to each user. Each mapping has rules that identify users and a list of roles that are granted to those users. The role mapping APIs are generally the preferred way to manage role mappings rather than using role mapping files. The create or update role mappings API cannot update role mappings that are defined in role mapping files. - -::::{note} -This API does not create roles. Rather, it maps users to existing roles. Roles can be created by using the create or update roles API or roles files. -:::: - - -**Role templates** - -The most common use for role mappings is to create a mapping from a known value on the user to a fixed role name. For example, all users in the `cn=admin,dc=example,dc=com` LDAP group should be given the superuser role in Elasticsearch. The `roles` field is used for this purpose. - -For more complex needs, it is possible to use Mustache templates to dynamically determine the names of the roles that should be granted to the user. The `role_templates` field is used for this purpose. - -::::{note} -To use role templates successfully, the relevant scripting feature must be enabled. Otherwise, all attempts to create a role mapping with role templates fail. -:::: - - -All of the user fields that are available in the role mapping rules are also available in the role templates. Thus it is possible to assign a user to a role that reflects their username, their groups, or the name of the realm to which they authenticated. - -By default a template is evaluated to produce a single string that is the name of the role which should be assigned to the user. If the format of the template is set to "json" then the template is expected to produce a JSON string or an array of JSON strings for the role names. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-put-role-mapping) - -```ts -client.security.putRoleMapping({ name }) -``` - - -### Arguments [_arguments_401] - -* **Request (object):** - - * **`name` (string)**: The distinct name that identifies the role mapping. The name is used solely as an identifier to facilitate interaction via the API; it does not affect the behavior of the mapping in any way. - * **`enabled` (Optional, boolean)**: Mappings that have `enabled` set to `false` are ignored when role mapping is performed. - * **`metadata` (Optional, Record)**: Additional metadata that helps define which roles are assigned to each user. Within the metadata object, keys beginning with `_` are reserved for system usage. - * **`roles` (Optional, string[])**: A list of role names that are granted to the users that match the role mapping rules. Exactly one of `roles` or `role_templates` must be specified. - * **`role_templates` (Optional, { format, template }[])**: A list of Mustache templates that will be evaluated to determine the roles names that should granted to the users that match the role mapping rules. Exactly one of `roles` or `role_templates` must be specified. - * **`rules` (Optional, { any, all, field, except })**: The rules that determine which users should be matched by the mapping. A rule is a logical condition that is expressed by using a JSON DSL. - * **`run_as` (Optional, string[])** - * **`refresh` (Optional, Enum(true | false | "wait_for"))**: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. - - - -### put_user [_put_user] - -Create or update users. - -Add and update users in the native realm. A password is required for adding a new user but is optional when updating an existing user. To change a user’s password without updating any other fields, use the change password API. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-put-user) - -```ts -client.security.putUser({ username }) -``` - - -### Arguments [_arguments_402] - -* **Request (object):** - - * **`username` (string)**: An identifier for the user. - - -::::{note} -Usernames must be at least 1 and no more than 507 characters. They can contain alphanumeric characters (a-z, A-Z, 0-9), spaces, punctuation, and printable symbols in the Basic Latin (ASCII) block. Leading or trailing whitespace is not allowed. ** *`email` (Optional, string | null)**: The email of the user. *** *`full_name` (Optional, string | null)**: The full name of the user. *** *`metadata` (Optional, Record)**: Arbitrary metadata that you want to associate with the user. *** *`password` (Optional, string)**: The user’s password. Passwords must be at least 6 characters long. When adding a user, one of `password` or `password_hash` is required. When updating an existing user, the password is optional, so that other fields on the user (such as their roles) may be updated without modifying the user’s password *** *`password_hash` (Optional, string)**: A hash of the user’s password. This must be produced using the same hashing algorithm as has been configured for password storage. For more details, see the explanation of the `xpack.security.authc.password_hashing.algorithm` setting in the user cache and password hash algorithm documentation. Using this parameter allows the client to pre-hash the password for performance and/or confidentiality reasons. The `password` parameter and the `password_hash` parameter cannot be used in the same request. *** *`roles` (Optional, string[])**: A set of roles the user has. The roles determine the user’s access permissions. To create a user without any roles, specify an empty list (`[]`). *** *`enabled` (Optional, boolean)**: Specifies whether the user is enabled. ** *`refresh` (Optional, Enum(true | false | "wait_for"))**: Valid values are `true`, `false`, and `wait_for`. These values have the same meaning as in the index API, but the default value for this API is true. -:::: - - - -### query_api_keys [_query_api_keys] - -Find API keys with a query. - -Get a paginated list of API keys and their information. You can optionally filter the results with a query. - -To use this API, you must have at least the `manage_own_api_key` or the `read_security` cluster privileges. If you have only the `manage_own_api_key` privilege, this API returns only the API keys that you own. If you have the `read_security`, `manage_api_key`, or greater privileges (including `manage_security`), this API returns all API keys regardless of ownership. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-query-api-keys) - -```ts -client.security.queryApiKeys({ ... }) -``` - - -### Arguments [_arguments_403] - -* **Request (object):** - - * **`aggregations` (Optional, Record)**: Any aggregations to run over the corpus of returned API keys. Aggregations and queries work together. Aggregations are computed only on the API keys that match the query. This supports only a subset of aggregation types, namely: `terms`, `range`, `date_range`, `missing`, `cardinality`, `value_count`, `composite`, `filter`, and `filters`. Additionally, aggregations only run over the same subset of fields that query works with. - * **`query` (Optional, { bool, exists, ids, match, match_all, prefix, range, simple_query_string, term, terms, wildcard })**: A query to filter which API keys to return. If the query parameter is missing, it is equivalent to a `match_all` query. The query supports a subset of query types, including `match_all`, `bool`, `term`, `terms`, `match`, `ids`, `prefix`, `wildcard`, `exists`, `range`, and `simple_query_string`. You can query the following public information associated with an API key: `id`, `type`, `name`, `creation`, `expiration`, `invalidated`, `invalidation`, `username`, `realm`, and `metadata`. - - -::::{note} -The queryable string values associated with API keys are internally mapped as keywords. Consequently, if no `analyzer` parameter is specified for a `match` query, then the provided match query string is interpreted as a single keyword value. Such a match query is hence equivalent to a `term` query. ** *`from` (Optional, number)**: The starting document offset. It must not be negative. By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the `search_after` parameter. *** *`sort` (Optional, string | { _score, _doc, _geo_distance, _script } | string | { _score, _doc, _geo_distance, _script }[])**: The sort definition. Other than `id`, all public fields of an API key are eligible for sorting. In addition, sort can also be applied to the `_doc` field to sort by index order. *** *`size` (Optional, number)**: The number of hits to return. It must not be negative. The `size` parameter can be set to `0`, in which case no API key matches are returned, only the aggregation results. By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the `search_after` parameter. *** *`search_after` (Optional, number | number | string | boolean | null | User-defined value[])**: The search after definition. *** *`with_limited_by` (Optional, boolean)**: Return the snapshot of the owner user’s role descriptors associated with the API key. An API key’s actual permission is the intersection of its assigned role descriptors and the owner user’s role descriptors (effectively limited by it). An API key cannot retrieve any API key’s limited-by role descriptors (including itself) unless it has `manage_api_key` or higher privileges. *** *`with_profile_uid` (Optional, boolean)**: Determines whether to also retrieve the profile UID for the API key owner principal. If it exists, the profile UID is returned under the `profile_uid` response field for each API key. ** *`typed_keys` (Optional, boolean)**: Determines whether aggregation names are prefixed by their respective types in the response. -:::: - - - -### query_role [_query_role] - -Find roles with a query. - -Get roles in a paginated manner. The role management APIs are generally the preferred way to manage roles, rather than using file-based role management. The query roles API does not retrieve roles that are defined in roles files, nor built-in ones. You can optionally filter the results with a query. Also, the results can be paginated and sorted. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-query-role) - -```ts -client.security.queryRole({ ... }) -``` - - -### Arguments [_arguments_404] - -* **Request (object):** - - * **`query` (Optional, { bool, exists, ids, match, match_all, prefix, range, simple_query_string, term, terms, wildcard })**: A query to filter which roles to return. If the query parameter is missing, it is equivalent to a `match_all` query. The query supports a subset of query types, including `match_all`, `bool`, `term`, `terms`, `match`, `ids`, `prefix`, `wildcard`, `exists`, `range`, and `simple_query_string`. You can query the following information associated with roles: `name`, `description`, `metadata`, `applications.application`, `applications.privileges`, and `applications.resources`. - * **`from` (Optional, number)**: The starting document offset. It must not be negative. By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the `search_after` parameter. - * **`sort` (Optional, string | { _score, _doc, _geo_distance, _script } | string | { _score, _doc, _geo_distance, _script }[])**: The sort definition. You can sort on `username`, `roles`, or `enabled`. In addition, sort can also be applied to the `_doc` field to sort by index order. - * **`size` (Optional, number)**: The number of hits to return. It must not be negative. By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the `search_after` parameter. - * **`search_after` (Optional, number | number | string | boolean | null | User-defined value[])**: The search after definition. - - - -### query_user [_query_user] - -Find users with a query. - -Get information for users in a paginated manner. You can optionally filter the results with a query. - -::::{note} -As opposed to the get user API, built-in users are excluded from the result. This API is only for native users. -:::: - - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-query-user) - -```ts -client.security.queryUser({ ... }) -``` - - -### Arguments [_arguments_405] - -* **Request (object):** - - * **`query` (Optional, { ids, bool, exists, match, match_all, prefix, range, simple_query_string, term, terms, wildcard })**: A query to filter which users to return. If the query parameter is missing, it is equivalent to a `match_all` query. The query supports a subset of query types, including `match_all`, `bool`, `term`, `terms`, `match`, `ids`, `prefix`, `wildcard`, `exists`, `range`, and `simple_query_string`. You can query the following information associated with user: `username`, `roles`, `enabled`, `full_name`, and `email`. - * **`from` (Optional, number)**: The starting document offset. It must not be negative. By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the `search_after` parameter. - * **`sort` (Optional, string | { _score, _doc, _geo_distance, _script } | string | { _score, _doc, _geo_distance, _script }[])**: The sort definition. Fields eligible for sorting are: `username`, `roles`, `enabled`. In addition, sort can also be applied to the `_doc` field to sort by index order. - * **`size` (Optional, number)**: The number of hits to return. It must not be negative. By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the `search_after` parameter. - * **`search_after` (Optional, number | number | string | boolean | null | User-defined value[])**: The search after definition - * **`with_profile_uid` (Optional, boolean)**: Determines whether to retrieve the user profile UID, if it exists, for the users. - - - -### saml_authenticate [_saml_authenticate] - -Authenticate SAML. - -Submit a SAML response message to Elasticsearch for consumption. - -::::{note} -This API is intended for use by custom web applications other than Kibana. If you are using Kibana, refer to the documentation for configuring SAML single-sign-on on the Elastic Stack. -:::: - - -The SAML message that is submitted can be: - -* A response to a SAML authentication request that was previously created using the SAML prepare authentication API. -* An unsolicited SAML message in the case of an IdP-initiated single sign-on (SSO) flow. - -In either case, the SAML message needs to be a base64 encoded XML document with a root element of ``. - -After successful validation, Elasticsearch responds with an Elasticsearch internal access token and refresh token that can be subsequently used for authentication. This API endpoint essentially exchanges SAML responses that indicate successful authentication in the IdP for Elasticsearch access and refresh tokens, which can be used for authentication against Elasticsearch. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-saml-authenticate) - -```ts -client.security.samlAuthenticate({ content, ids }) -``` - - -### Arguments [_arguments_406] - -* **Request (object):** - - * **`content` (string)**: The SAML response as it was sent by the user’s browser, usually a Base64 encoded XML document. - * **`ids` (string | string[])**: A JSON array with all the valid SAML Request Ids that the caller of the API has for the current user. - * **`realm` (Optional, string)**: The name of the realm that should authenticate the SAML response. Useful in cases where many SAML realms are defined. - - - -### saml_complete_logout [_saml_complete_logout] - -Logout of SAML completely. - -Verifies the logout response sent from the SAML IdP. - -::::{note} -This API is intended for use by custom web applications other than Kibana. If you are using Kibana, refer to the documentation for configuring SAML single-sign-on on the Elastic Stack. -:::: - - -The SAML IdP may send a logout response back to the SP after handling the SP-initiated SAML Single Logout. This API verifies the response by ensuring the content is relevant and validating its signature. An empty response is returned if the verification process is successful. The response can be sent by the IdP with either the HTTP-Redirect or the HTTP-Post binding. The caller of this API must prepare the request accordingly so that this API can handle either of them. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-saml-complete-logout) - -```ts -client.security.samlCompleteLogout({ realm, ids }) -``` - - -### Arguments [_arguments_407] - -* **Request (object):** - - * **`realm` (string)**: The name of the SAML realm in Elasticsearch for which the configuration is used to verify the logout response. - * **`ids` (string | string[])**: A JSON array with all the valid SAML Request Ids that the caller of the API has for the current user. - * **`query_string` (Optional, string)**: If the SAML IdP sends the logout response with the HTTP-Redirect binding, this field must be set to the query string of the redirect URI. - * **`content` (Optional, string)**: If the SAML IdP sends the logout response with the HTTP-Post binding, this field must be set to the value of the SAMLResponse form parameter from the logout response. - - - -### saml_invalidate [_saml_invalidate] - -Invalidate SAML. - -Submit a SAML LogoutRequest message to Elasticsearch for consumption. - -::::{note} -This API is intended for use by custom web applications other than Kibana. If you are using Kibana, refer to the documentation for configuring SAML single-sign-on on the Elastic Stack. -:::: - - -The logout request comes from the SAML IdP during an IdP initiated Single Logout. The custom web application can use this API to have Elasticsearch process the `LogoutRequest`. After successful validation of the request, Elasticsearch invalidates the access token and refresh token that corresponds to that specific SAML principal and provides a URL that contains a SAML LogoutResponse message. Thus the user can be redirected back to their IdP. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-saml-invalidate) - -```ts -client.security.samlInvalidate({ query_string }) -``` - - -### Arguments [_arguments_408] - -* **Request (object):** - - * **`query_string` (string)**: The query part of the URL that the user was redirected to by the SAML IdP to initiate the Single Logout. This query should include a single parameter named `SAMLRequest` that contains a SAML logout request that is deflated and Base64 encoded. If the SAML IdP has signed the logout request, the URL should include two extra parameters named `SigAlg` and `Signature` that contain the algorithm used for the signature and the signature value itself. In order for Elasticsearch to be able to verify the IdP’s signature, the value of the `query_string` field must be an exact match to the string provided by the browser. The client application must not attempt to parse or process the string in any way. - * **`acs` (Optional, string)**: The Assertion Consumer Service URL that matches the one of the SAML realm in Elasticsearch that should be used. You must specify either this parameter or the `realm` parameter. - * **`realm` (Optional, string)**: The name of the SAML realm in Elasticsearch the configuration. You must specify either this parameter or the `acs` parameter. - - - -### saml_logout [_saml_logout] - -Logout of SAML. - -Submits a request to invalidate an access token and refresh token. - -::::{note} -This API is intended for use by custom web applications other than Kibana. If you are using Kibana, refer to the documentation for configuring SAML single-sign-on on the Elastic Stack. -:::: - - -This API invalidates the tokens that were generated for a user by the SAML authenticate API. If the SAML realm in Elasticsearch is configured accordingly and the SAML IdP supports this, the Elasticsearch response contains a URL to redirect the user to the IdP that contains a SAML logout request (starting an SP-initiated SAML Single Logout). - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-saml-logout) - -```ts -client.security.samlLogout({ token }) -``` - - -### Arguments [_arguments_409] - -* **Request (object):** - - * **`token` (string)**: The access token that was returned as a response to calling the SAML authenticate API. Alternatively, the most recent token that was received after refreshing the original one by using a `refresh_token`. - * **`refresh_token` (Optional, string)**: The refresh token that was returned as a response to calling the SAML authenticate API. Alternatively, the most recent refresh token that was received after refreshing the original access token. - - - -### saml_prepare_authentication [_saml_prepare_authentication] - -Prepare SAML authentication. - -Create a SAML authentication request (``) as a URL string based on the configuration of the respective SAML realm in Elasticsearch. - -::::{note} -This API is intended for use by custom web applications other than Kibana. If you are using Kibana, refer to the documentation for configuring SAML single-sign-on on the Elastic Stack. -:::: - - -This API returns a URL pointing to the SAML Identity Provider. You can use the URL to redirect the browser of the user in order to continue the authentication process. The URL includes a single parameter named `SAMLRequest`, which contains a SAML Authentication request that is deflated and Base64 encoded. If the configuration dictates that SAML authentication requests should be signed, the URL has two extra parameters named `SigAlg` and `Signature`. These parameters contain the algorithm used for the signature and the signature value itself. It also returns a random string that uniquely identifies this SAML Authentication request. The caller of this API needs to store this identifier as it needs to be used in a following step of the authentication process. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-saml-prepare-authentication) - -```ts -client.security.samlPrepareAuthentication({ ... }) -``` - - -### Arguments [_arguments_410] - -* **Request (object):** - - * **`acs` (Optional, string)**: The Assertion Consumer Service URL that matches the one of the SAML realms in Elasticsearch. The realm is used to generate the authentication request. You must specify either this parameter or the `realm` parameter. - * **`realm` (Optional, string)**: The name of the SAML realm in Elasticsearch for which the configuration is used to generate the authentication request. You must specify either this parameter or the `acs` parameter. - * **`relay_state` (Optional, string)**: A string that will be included in the redirect URL that this API returns as the `RelayState` query parameter. If the Authentication Request is signed, this value is used as part of the signature computation. - - - -### saml_service_provider_metadata [_saml_service_provider_metadata] - -Create SAML service provider metadata. - -Generate SAML metadata for a SAML 2.0 Service Provider. - -The SAML 2.0 specification provides a mechanism for Service Providers to describe their capabilities and configuration using a metadata file. This API generates Service Provider metadata based on the configuration of a SAML realm in Elasticsearch. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-saml-service-provider-metadata) - -```ts -client.security.samlServiceProviderMetadata({ realm_name }) -``` - - -### Arguments [_arguments_411] - -* **Request (object):** - - * **`realm_name` (string)**: The name of the SAML realm in Elasticsearch. - - - -### suggest_user_profiles [_suggest_user_profiles] - -Suggest a user profile. - -Get suggestions for user profiles that match specified search criteria. - -::::{note} -The user profile feature is designed only for use by Kibana and Elastic’s Observability, Search and Elastic Security solutions. Individual users and external applications should not call this API directly. Elastic reserves the right to change or remove this feature in future releases without prior notice. -:::: - - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-suggest-user-profiles) - -```ts -client.security.suggestUserProfiles({ ... }) -``` - - -### Arguments [_arguments_412] - -* **Request (object):** - - * **`name` (Optional, string)**: A query string used to match name-related fields in user profile documents. Name-related fields are the user’s `username`, `full_name`, and `email`. - * **`size` (Optional, number)**: The number of profiles to return. - * **`data` (Optional, string | string[])**: A list of filters for the `data` field of the profile document. To return all content use `data=*`. To return a subset of content, use `data=` to retrieve content nested under the specified ``. By default, the API returns no `data` content. It is an error to specify `data` as both the query parameter and the request body field. - * **`hint` (Optional, { uids, labels })**: Extra search criteria to improve relevance of the suggestion result. Profiles matching the spcified hint are ranked higher in the response. Profiles not matching the hint aren’t excluded from the response as long as the profile matches the `name` field query. - - - -### update_api_key [_update_api_key] - -Update an API key. - -Update attributes of an existing API key. This API supports updates to an API key’s access scope, expiration, and metadata. - -To use this API, you must have at least the `manage_own_api_key` cluster privilege. Users can only update API keys that they created or that were granted to them. To update another user’s API key, use the `run_as` feature to submit a request on behalf of another user. - -::::{important} -It’s not possible to use an API key as the authentication credential for this API. The owner user’s credentials are required. -:::: - - -Use this API to update API keys created by the create API key or grant API Key APIs. If you need to apply the same update to many API keys, you can use the bulk update API keys API to reduce overhead. It’s not possible to update expired API keys or API keys that have been invalidated by the invalidate API key API. - -The access scope of an API key is derived from the `role_descriptors` you specify in the request and a snapshot of the owner user’s permissions at the time of the request. The snapshot of the owner’s permissions is updated automatically on every call. - -::::{important} -If you don’t specify `role_descriptors` in the request, a call to this API might still change the API key’s access scope. This change can occur if the owner user’s permissions have changed since the API key was created or last modified. -:::: - - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-update-api-key) - -```ts -client.security.updateApiKey({ id }) -``` - - -### Arguments [_arguments_413] - -* **Request (object):** - - * **`id` (string)**: The ID of the API key to update. - * **`role_descriptors` (Optional, Record)**: The role descriptors to assign to this API key. The API key’s effective permissions are an intersection of its assigned privileges and the point in time snapshot of permissions of the owner user. You can assign new privileges by specifying them in this parameter. To remove assigned privileges, you can supply an empty `role_descriptors` parameter, that is to say, an empty object `{}`. If an API key has no assigned privileges, it inherits the owner user’s full permissions. The snapshot of the owner’s permissions is always updated, whether you supply the `role_descriptors` parameter or not. The structure of a role descriptor is the same as the request for the create API keys API. - * **`metadata` (Optional, Record)**: Arbitrary metadata that you want to associate with the API key. It supports a nested data structure. Within the metadata object, keys beginning with `_` are reserved for system usage. When specified, this value fully replaces the metadata previously associated with the API key. - * **`expiration` (Optional, string | -1 | 0)**: The expiration time for the API key. By default, API keys never expire. This property can be omitted to leave the expiration unchanged. - - - -### update_cross_cluster_api_key [_update_cross_cluster_api_key] - -Update a cross-cluster API key. - -Update the attributes of an existing cross-cluster API key, which is used for API key based remote cluster access. - -To use this API, you must have at least the `manage_security` cluster privilege. Users can only update API keys that they created. To update another user’s API key, use the `run_as` feature to submit a request on behalf of another user. - -::::{important} -It’s not possible to use an API key as the authentication credential for this API. To update an API key, the owner user’s credentials are required. -:::: - - -It’s not possible to update expired API keys, or API keys that have been invalidated by the invalidate API key API. - -This API supports updates to an API key’s access scope, metadata, and expiration. The owner user’s information, such as the `username` and `realm`, is also updated automatically on every call. - -::::{note} -This API cannot update REST API keys, which should be updated by either the update API key or bulk update API keys API. -:::: - - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-update-cross-cluster-api-key) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-security-put-role) ```ts -client.security.updateCrossClusterApiKey({ id, access }) -``` - - -### Arguments [_arguments_414] - -* **Request (object):** - - * **`id` (string)**: The ID of the cross-cluster API key to update. - * **`access` ({ replication, search })**: The access to be granted to this API key. The access is composed of permissions for cross cluster search and cross cluster replication. At least one of them must be specified. When specified, the new access assignment fully replaces the previously assigned access. - * **`expiration` (Optional, string | -1 | 0)**: The expiration time for the API key. By default, API keys never expire. This property can be omitted to leave the value unchanged. - * **`metadata` (Optional, Record)**: Arbitrary metadata that you want to associate with the API key. It supports nested data structure. Within the metadata object, keys beginning with `_` are reserved for system usage. When specified, this information fully replaces metadata previously associated with the API key. - - - -### update_settings [_update_settings] - -Update security index settings. - -Update the user-configurable settings for the security internal index (`.security` and associated indices). Only a subset of settings are allowed to be modified. This includes `index.auto_expand_replicas` and `index.number_of_replicas`. - -::::{note} -If `index.auto_expand_replicas` is set, `index.number_of_replicas` will be ignored during updates. -:::: - - -If a specific index is not in use on the system and settings are provided for it, the request will be rejected. This API does not yet support configuring the settings for indices before they are in use. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-update-settings) - -```ts -client.security.updateSettings({ ... }) -``` - - -### Arguments [_arguments_415] - -* **Request (object):** - - * **`security` (Optional, { index })**: Settings for the index used for most security configuration, including native realm users and roles configured with the API. - * **`security-profile` (Optional, { index })**: Settings for the index used to store profile information. - * **`security-tokens` (Optional, { index })**: Settings for the index used to store tokens. - * **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - * **`timeout` (Optional, string | -1 | 0)**: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - - - -### update_user_profile_data [_update_user_profile_data] - -Update user profile data. - -Update specific data for the user profile that is associated with a unique ID. - -::::{note} -The user profile feature is designed only for use by Kibana and Elastic’s Observability, Search and Elastic Security solutions. Individual users and external applications should not call this API directly. Elastic reserves the right to change or remove this feature in future releases without prior notice. -:::: - - -To use this API, you must have one of the following privileges: - -* The `manage_user_profile` cluster privilege. -* The `update_profile_data` global privilege for the namespaces that are referenced in the request. - -This API updates the `labels` and `data` fields of an existing user profile document with JSON objects. New keys and their values are added to the profile document and conflicting keys are replaced by data that’s included in the request. - -For both labels and data, content is namespaced by the top-level fields. The `update_profile_data` global privilege grants privileges for updating only the allowed namespaces. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-update-user-profile-data) - -```ts -client.security.updateUserProfileData({ uid }) -``` - - -### Arguments [_arguments_416] - -* **Request (object):** - - * **`uid` (string)**: A unique identifier for the user profile. - * **`labels` (Optional, Record)**: Searchable data that you want to associate with the user profile. This field supports a nested data structure. Within the labels object, top-level keys cannot begin with an underscore (`_`) or contain a period (`.`). - * **`data` (Optional, Record)**: Non-searchable data that you want to associate with the user profile. This field supports a nested data structure. Within the `data` object, top-level keys cannot begin with an underscore (`_`) or contain a period (`.`). The data object is not searchable, but can be retrieved with the get user profile API. - * **`if_seq_no` (Optional, number)**: Only perform the operation if the document has this sequence number. - * **`if_primary_term` (Optional, number)**: Only perform the operation if the document has this primary term. - * **`refresh` (Optional, Enum(true | false | "wait_for"))**: If *true*, Elasticsearch refreshes the affected shards to make this operation visible to search. If *wait_for*, it waits for a refresh to make this operation visible to search. If *false*, nothing is done with refreshes. - - - -## shutdown [_shutdown] - - -### delete_node [_delete_node] - -Cancel node shutdown preparations. Remove a node from the shutdown list so it can resume normal operations. You must explicitly clear the shutdown request when a node rejoins the cluster or when a node has permanently left the cluster. Shutdown requests are never removed automatically by Elasticsearch. - -::::{note} -This feature is designed for indirect use by Elastic Cloud, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. -:::: - - -If the operator privileges feature is enabled, you must be an operator to use this API. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-shutdown-delete-node) - -```ts -client.shutdown.deleteNode({ node_id }) +client.security.putRole({ name }) ``` +### Arguments [_arguments_security.put_role] -### Arguments [_arguments_417] +#### Request (object) [_request_security.put_role] +- **`name` (string)**: The name of the role that is being created or updated. On Elasticsearch Serverless, the role name must begin with a letter or digit and can only contain letters, digits and the characters '_', '-', and '.'. Each role must have a unique name, as this will serve as the identifier for that role. +- **`applications` (Optional, { application, privileges, resources }[])**: A list of application privilege entries. +- **`cluster` (Optional, Enum("all" | "cancel_task" | "create_snapshot" | "cross_cluster_replication" | "cross_cluster_search" | "delegate_pki" | "grant_api_key" | "manage" | "manage_api_key" | "manage_autoscaling" | "manage_behavioral_analytics" | "manage_ccr" | "manage_data_frame_transforms" | "manage_data_stream_global_retention" | "manage_enrich" | "manage_ilm" | "manage_index_templates" | "manage_inference" | "manage_ingest_pipelines" | "manage_logstash_pipelines" | "manage_ml" | "manage_oidc" | "manage_own_api_key" | "manage_pipeline" | "manage_rollup" | "manage_saml" | "manage_search_application" | "manage_search_query_rules" | "manage_search_synonyms" | "manage_security" | "manage_service_account" | "manage_slm" | "manage_token" | "manage_transform" | "manage_user_profile" | "manage_watcher" | "monitor" | "monitor_data_frame_transforms" | "monitor_data_stream_global_retention" | "monitor_enrich" | "monitor_inference" | "monitor_ml" | "monitor_rollup" | "monitor_snapshot" | "monitor_stats" | "monitor_text_structure" | "monitor_transform" | "monitor_watcher" | "none" | "post_behavioral_analytics_event" | "read_ccr" | "read_fleet_secrets" | "read_ilm" | "read_pipeline" | "read_security" | "read_slm" | "transport_client" | "write_connector_secrets" | "write_fleet_secrets")[])**: A list of cluster privileges. These privileges define the cluster-level actions for users with this role. +- **`global` (Optional, Record)**: An object defining global privileges. A global privilege is a form of cluster privilege that is request-aware. Support for global privileges is currently limited to the management of application privileges. +- **`indices` (Optional, { field_security, names, privileges, query, allow_restricted_indices }[])**: A list of indices permissions entries. +- **`remote_indices` (Optional, { clusters, field_security, names, privileges, query, allow_restricted_indices }[])**: A list of remote indices permissions entries. -* **Request (object):** +NOTE: Remote indices are effective for remote clusters configured with the API key based model. +They have no effect for remote clusters configured with the certificate based model. +- **`remote_cluster` (Optional, { clusters, privileges }[])**: A list of remote cluster permissions entries. +- **`metadata` (Optional, Record)**: Optional metadata. Within the metadata object, keys that begin with an underscore (`_`) are reserved for system use. +- **`run_as` (Optional, string[])**: A list of users that the owners of this role can impersonate. *Note*: in Serverless, the run-as feature is disabled. For API compatibility, you can still specify an empty `run_as` field, but a non-empty list will be rejected. +- **`description` (Optional, string)**: Optional description of the role descriptor +- **`transient_metadata` (Optional, Record)**: Indicates roles that might be incompatible with the current cluster license, specifically roles with document and field level security. When the cluster license doesn’t allow certain features for a given role, this parameter is updated dynamically to list the incompatible features. If `enabled` is `false`, the role is ignored, but is still listed in the response from the authenticate API. +- **`refresh` (Optional, Enum(true | false | "wait_for"))**: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. - * **`node_id` (string)**: The node id of node to be removed from the shutdown state - * **`master_timeout` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - * **`timeout` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +## client.security.putRoleMapping [_security.put_role_mapping] +Create or update role mappings. +Role mappings define which roles are assigned to each user. +Each mapping has rules that identify users and a list of roles that are granted to those users. +The role mapping APIs are generally the preferred way to manage role mappings rather than using role mapping files. The create or update role mappings API cannot update role mappings that are defined in role mapping files. +NOTE: This API does not create roles. Rather, it maps users to existing roles. +Roles can be created by using the create or update roles API or roles files. -### get_node [_get_node] +**Role templates** -Get the shutdown status. +The most common use for role mappings is to create a mapping from a known value on the user to a fixed role name. +For example, all users in the `cn=admin,dc=example,dc=com` LDAP group should be given the superuser role in Elasticsearch. +The `roles` field is used for this purpose. -Get information about nodes that are ready to be shut down, have shut down preparations still in progress, or have stalled. The API returns status information for each part of the shut down process. +For more complex needs, it is possible to use Mustache templates to dynamically determine the names of the roles that should be granted to the user. +The `role_templates` field is used for this purpose. -::::{note} -This feature is designed for indirect use by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. -:::: +NOTE: To use role templates successfully, the relevant scripting feature must be enabled. +Otherwise, all attempts to create a role mapping with role templates fail. +All of the user fields that are available in the role mapping rules are also available in the role templates. +Thus it is possible to assign a user to a role that reflects their username, their groups, or the name of the realm to which they authenticated. -If the operator privileges feature is enabled, you must be an operator to use this API. +By default a template is evaluated to produce a single string that is the name of the role which should be assigned to the user. +If the format of the template is set to "json" then the template is expected to produce a JSON string or an array of JSON strings for the role names. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-shutdown-get-node) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-security-put-role-mapping) ```ts -client.shutdown.getNode({ ... }) +client.security.putRoleMapping({ name }) ``` +### Arguments [_arguments_security.put_role_mapping] + +#### Request (object) [_request_security.put_role_mapping] +- **`name` (string)**: The distinct name that identifies the role mapping. +The name is used solely as an identifier to facilitate interaction via the API; it does not affect the behavior of the mapping in any way. +- **`enabled` (Optional, boolean)**: Mappings that have `enabled` set to `false` are ignored when role mapping is performed. +- **`metadata` (Optional, Record)**: Additional metadata that helps define which roles are assigned to each user. +Within the metadata object, keys beginning with `_` are reserved for system usage. +- **`roles` (Optional, string[])**: A list of role names that are granted to the users that match the role mapping rules. +Exactly one of `roles` or `role_templates` must be specified. +- **`role_templates` (Optional, { format, template }[])**: A list of Mustache templates that will be evaluated to determine the roles names that should granted to the users that match the role mapping rules. +Exactly one of `roles` or `role_templates` must be specified. +- **`rules` (Optional, { any, all, field, except })**: The rules that determine which users should be matched by the mapping. +A rule is a logical condition that is expressed by using a JSON DSL. +- **`run_as` (Optional, string[])** +- **`refresh` (Optional, Enum(true | false | "wait_for"))**: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. + +## client.security.putUser [_security.put_user] +Create or update users. -### Arguments [_arguments_418] +Add and update users in the native realm. +A password is required for adding a new user but is optional when updating an existing user. +To change a user's password without updating any other fields, use the change password API. -* **Request (object):** +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-security-put-user) - * **`node_id` (Optional, string | string[])**: Which node for which to retrieve the shutdown status - * **`master_timeout` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +```ts +client.security.putUser({ username }) +``` +### Arguments [_arguments_security.put_user] + +#### Request (object) [_request_security.put_user] +- **`username` (string)**: An identifier for the user. + +NOTE: Usernames must be at least 1 and no more than 507 characters. +They can contain alphanumeric characters (a-z, A-Z, 0-9), spaces, punctuation, and printable symbols in the Basic Latin (ASCII) block. +Leading or trailing whitespace is not allowed. +- **`email` (Optional, string | null)**: The email of the user. +- **`full_name` (Optional, string | null)**: The full name of the user. +- **`metadata` (Optional, Record)**: Arbitrary metadata that you want to associate with the user. +- **`password` (Optional, string)**: The user's password. +Passwords must be at least 6 characters long. +When adding a user, one of `password` or `password_hash` is required. +When updating an existing user, the password is optional, so that other fields on the user (such as their roles) may be updated without modifying the user's password +- **`password_hash` (Optional, string)**: A hash of the user's password. +This must be produced using the same hashing algorithm as has been configured for password storage. +For more details, see the explanation of the `xpack.security.authc.password_hashing.algorithm` setting in the user cache and password hash algorithm documentation. +Using this parameter allows the client to pre-hash the password for performance and/or confidentiality reasons. +The `password` parameter and the `password_hash` parameter cannot be used in the same request. +- **`roles` (Optional, string[])**: A set of roles the user has. +The roles determine the user's access permissions. +To create a user without any roles, specify an empty list (`[]`). +- **`enabled` (Optional, boolean)**: Specifies whether the user is enabled. +- **`refresh` (Optional, Enum(true | false | "wait_for"))**: Valid values are `true`, `false`, and `wait_for`. +These values have the same meaning as in the index API, but the default value for this API is true. + +## client.security.queryApiKeys [_security.query_api_keys] +Find API keys with a query. +Get a paginated list of API keys and their information. +You can optionally filter the results with a query. -### put_node [_put_node] +To use this API, you must have at least the `manage_own_api_key` or the `read_security` cluster privileges. +If you have only the `manage_own_api_key` privilege, this API returns only the API keys that you own. +If you have the `read_security`, `manage_api_key`, or greater privileges (including `manage_security`), this API returns all API keys regardless of ownership. -Prepare a node to be shut down. +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-security-query-api-keys) -::::{note} -This feature is designed for indirect use by Elastic Cloud, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. -:::: +```ts +client.security.queryApiKeys({ ... }) +``` +### Arguments [_arguments_security.query_api_keys] + +#### Request (object) [_request_security.query_api_keys] +- **`aggregations` (Optional, Record)**: Any aggregations to run over the corpus of returned API keys. +Aggregations and queries work together. Aggregations are computed only on the API keys that match the query. +This supports only a subset of aggregation types, namely: `terms`, `range`, `date_range`, `missing`, +`cardinality`, `value_count`, `composite`, `filter`, and `filters`. +Additionally, aggregations only run over the same subset of fields that query works with. +- **`query` (Optional, { bool, exists, ids, match, match_all, prefix, range, simple_query_string, term, terms, wildcard })**: A query to filter which API keys to return. +If the query parameter is missing, it is equivalent to a `match_all` query. +The query supports a subset of query types, including `match_all`, `bool`, `term`, `terms`, `match`, +`ids`, `prefix`, `wildcard`, `exists`, `range`, and `simple_query_string`. +You can query the following public information associated with an API key: `id`, `type`, `name`, +`creation`, `expiration`, `invalidated`, `invalidation`, `username`, `realm`, and `metadata`. + +NOTE: The queryable string values associated with API keys are internally mapped as keywords. +Consequently, if no `analyzer` parameter is specified for a `match` query, then the provided match query string is interpreted as a single keyword value. +Such a match query is hence equivalent to a `term` query. +- **`from` (Optional, number)**: The starting document offset. +It must not be negative. +By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. +To page through more hits, use the `search_after` parameter. +- **`sort` (Optional, string | { _score, _doc, _geo_distance, _script } | string | { _score, _doc, _geo_distance, _script }[])**: The sort definition. +Other than `id`, all public fields of an API key are eligible for sorting. +In addition, sort can also be applied to the `_doc` field to sort by index order. +- **`size` (Optional, number)**: The number of hits to return. +It must not be negative. +The `size` parameter can be set to `0`, in which case no API key matches are returned, only the aggregation results. +By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. +To page through more hits, use the `search_after` parameter. +- **`search_after` (Optional, number | number | string | boolean | null[])**: The search after definition. +- **`with_limited_by` (Optional, boolean)**: Return the snapshot of the owner user's role descriptors associated with the API key. +An API key's actual permission is the intersection of its assigned role descriptors and the owner user's role descriptors (effectively limited by it). +An API key cannot retrieve any API key’s limited-by role descriptors (including itself) unless it has `manage_api_key` or higher privileges. +- **`with_profile_uid` (Optional, boolean)**: Determines whether to also retrieve the profile UID for the API key owner principal. +If it exists, the profile UID is returned under the `profile_uid` response field for each API key. +- **`typed_keys` (Optional, boolean)**: Determines whether aggregation names are prefixed by their respective types in the response. + +## client.security.queryRole [_security.query_role] +Find roles with a query. -If you specify a node that is offline, it will be prepared for shut down when it rejoins the cluster. +Get roles in a paginated manner. +The role management APIs are generally the preferred way to manage roles, rather than using file-based role management. +The query roles API does not retrieve roles that are defined in roles files, nor built-in ones. +You can optionally filter the results with a query. +Also, the results can be paginated and sorted. -If the operator privileges feature is enabled, you must be an operator to use this API. +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-security-query-role) -The API migrates ongoing tasks and index shards to other nodes as needed to prepare a node to be restarted or shut down and removed from the cluster. This ensures that Elasticsearch can be stopped safely with minimal disruption to the cluster. +```ts +client.security.queryRole({ ... }) +``` -You must specify the type of shutdown: `restart`, `remove`, or `replace`. If a node is already being prepared for shutdown, you can use this API to change the shutdown type. +### Arguments [_arguments_security.query_role] + +#### Request (object) [_request_security.query_role] +- **`query` (Optional, { bool, exists, ids, match, match_all, prefix, range, simple_query_string, term, terms, wildcard })**: A query to filter which roles to return. +If the query parameter is missing, it is equivalent to a `match_all` query. +The query supports a subset of query types, including `match_all`, `bool`, `term`, `terms`, `match`, +`ids`, `prefix`, `wildcard`, `exists`, `range`, and `simple_query_string`. +You can query the following information associated with roles: `name`, `description`, `metadata`, +`applications.application`, `applications.privileges`, and `applications.resources`. +- **`from` (Optional, number)**: The starting document offset. +It must not be negative. +By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. +To page through more hits, use the `search_after` parameter. +- **`sort` (Optional, string | { _score, _doc, _geo_distance, _script } | string | { _score, _doc, _geo_distance, _script }[])**: The sort definition. +You can sort on `username`, `roles`, or `enabled`. +In addition, sort can also be applied to the `_doc` field to sort by index order. +- **`size` (Optional, number)**: The number of hits to return. +It must not be negative. +By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. +To page through more hits, use the `search_after` parameter. +- **`search_after` (Optional, number | number | string | boolean | null[])**: The search after definition. + +## client.security.queryUser [_security.query_user] +Find users with a query. -::::{important} -This API does NOT terminate the Elasticsearch process. Monitor the node shutdown status to determine when it is safe to stop Elasticsearch. -:::: +Get information for users in a paginated manner. +You can optionally filter the results with a query. +NOTE: As opposed to the get user API, built-in users are excluded from the result. +This API is only for native users. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-shutdown-put-node) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-security-query-user) ```ts -client.shutdown.putNode({ node_id, type, reason }) +client.security.queryUser({ ... }) ``` +### Arguments [_arguments_security.query_user] + +#### Request (object) [_request_security.query_user] +- **`query` (Optional, { ids, bool, exists, match, match_all, prefix, range, simple_query_string, term, terms, wildcard })**: A query to filter which users to return. +If the query parameter is missing, it is equivalent to a `match_all` query. +The query supports a subset of query types, including `match_all`, `bool`, `term`, `terms`, `match`, +`ids`, `prefix`, `wildcard`, `exists`, `range`, and `simple_query_string`. +You can query the following information associated with user: `username`, `roles`, `enabled`, `full_name`, and `email`. +- **`from` (Optional, number)**: The starting document offset. +It must not be negative. +By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. +To page through more hits, use the `search_after` parameter. +- **`sort` (Optional, string | { _score, _doc, _geo_distance, _script } | string | { _score, _doc, _geo_distance, _script }[])**: The sort definition. +Fields eligible for sorting are: `username`, `roles`, `enabled`. +In addition, sort can also be applied to the `_doc` field to sort by index order. +- **`size` (Optional, number)**: The number of hits to return. +It must not be negative. +By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. +To page through more hits, use the `search_after` parameter. +- **`search_after` (Optional, number | number | string | boolean | null[])**: The search after definition +- **`with_profile_uid` (Optional, boolean)**: Determines whether to retrieve the user profile UID, if it exists, for the users. + +## client.security.samlAuthenticate [_security.saml_authenticate] +Authenticate SAML. + +Submit a SAML response message to Elasticsearch for consumption. -### Arguments [_arguments_419] +NOTE: This API is intended for use by custom web applications other than Kibana. +If you are using Kibana, refer to the documentation for configuring SAML single-sign-on on the Elastic Stack. -* **Request (object):** +The SAML message that is submitted can be: - * **`node_id` (string)**: The node identifier. This parameter is not validated against the cluster’s active nodes. This enables you to register a node for shut down while it is offline. No error is thrown if you specify an invalid node ID. - * **`type` (Enum("restart" | "remove" | "replace"))**: Valid values are restart, remove, or replace. Use restart when you need to temporarily shut down a node to perform an upgrade, make configuration changes, or perform other maintenance. Because the node is expected to rejoin the cluster, data is not migrated off of the node. Use remove when you need to permanently remove a node from the cluster. The node is not marked ready for shutdown until data is migrated off of the node Use replace to do a 1:1 replacement of a node with another node. Certain allocation decisions will be ignored (such as disk watermarks) in the interest of true replacement of the source node with the target node. During a replace-type shutdown, rollover and index creation may result in unassigned shards, and shrink may fail until the replacement is complete. - * **`reason` (string)**: A human-readable reason that the node is being shut down. This field provides information for other cluster operators; it does not affect the shut down process. - * **`allocation_delay` (Optional, string)**: Only valid if type is restart. Controls how long Elasticsearch will wait for the node to restart and join the cluster before reassigning its shards to other nodes. This works the same as delaying allocation with the index.unassigned.node_left.delayed_timeout setting. If you specify both a restart allocation delay and an index-level allocation delay, the longer of the two is used. - * **`target_node_name` (Optional, string)**: Only valid if type is replace. Specifies the name of the node that is replacing the node being shut down. Shards from the shut down node are only allowed to be allocated to the target node, and no other data will be allocated to the target node. During relocation of data certain allocation rules are ignored, such as disk watermarks or user attribute filtering rules. - * **`master_timeout` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))**: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - * **`timeout` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))**: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +* A response to a SAML authentication request that was previously created using the SAML prepare authentication API. +* An unsolicited SAML message in the case of an IdP-initiated single sign-on (SSO) flow. +In either case, the SAML message needs to be a base64 encoded XML document with a root element of ``. +After successful validation, Elasticsearch responds with an Elasticsearch internal access token and refresh token that can be subsequently used for authentication. +This API endpoint essentially exchanges SAML responses that indicate successful authentication in the IdP for Elasticsearch access and refresh tokens, which can be used for authentication against Elasticsearch. -## simulate [_simulate_2] +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-security-saml-authenticate) +```ts +client.security.samlAuthenticate({ content, ids }) +``` -### ingest [_ingest_2] +### Arguments [_arguments_security.saml_authenticate] -Simulate data ingestion. Run ingest pipelines against a set of provided documents, optionally with substitute pipeline definitions, to simulate ingesting data into an index. +#### Request (object) [_request_security.saml_authenticate] +- **`content` (string)**: The SAML response as it was sent by the user's browser, usually a Base64 encoded XML document. +- **`ids` (string | string[])**: A JSON array with all the valid SAML Request Ids that the caller of the API has for the current user. +- **`realm` (Optional, string)**: The name of the realm that should authenticate the SAML response. Useful in cases where many SAML realms are defined. -This API is meant to be used for troubleshooting or pipeline development, as it does not actually index any data into Elasticsearch. +## client.security.samlCompleteLogout [_security.saml_complete_logout] +Logout of SAML completely. -The API runs the default and final pipeline for that index against a set of documents provided in the body of the request. If a pipeline contains a reroute processor, it follows that reroute processor to the new index, running that index’s pipelines as well the same way that a non-simulated ingest would. No data is indexed into Elasticsearch. Instead, the transformed document is returned, along with the list of pipelines that have been run and the name of the index where the document would have been indexed if this were not a simulation. The transformed document is validated against the mappings that would apply to this index, and any validation error is reported in the result. +Verifies the logout response sent from the SAML IdP. -This API differs from the simulate pipeline API in that you specify a single pipeline for that API, and it runs only that one pipeline. The simulate pipeline API is more useful for developing a single pipeline, while the simulate ingest API is more useful for troubleshooting the interaction of the various pipelines that get applied when ingesting into an index. +NOTE: This API is intended for use by custom web applications other than Kibana. +If you are using Kibana, refer to the documentation for configuring SAML single-sign-on on the Elastic Stack. -By default, the pipeline definitions that are currently in the system are used. However, you can supply substitute pipeline definitions in the body of the request. These will be used in place of the pipeline definitions that are already in the system. This can be used to replace existing pipeline definitions or to create new ones. The pipeline substitutions are used only within this request. +The SAML IdP may send a logout response back to the SP after handling the SP-initiated SAML Single Logout. +This API verifies the response by ensuring the content is relevant and validating its signature. +An empty response is returned if the verification process is successful. +The response can be sent by the IdP with either the HTTP-Redirect or the HTTP-Post binding. +The caller of this API must prepare the request accordingly so that this API can handle either of them. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-simulate-ingest) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-security-saml-complete-logout) ```ts -client.simulate.ingest({ docs }) +client.security.samlCompleteLogout({ realm, ids }) ``` +### Arguments [_arguments_security.saml_complete_logout] -### Arguments [_arguments_420] +#### Request (object) [_request_security.saml_complete_logout] +- **`realm` (string)**: The name of the SAML realm in Elasticsearch for which the configuration is used to verify the logout response. +- **`ids` (string | string[])**: A JSON array with all the valid SAML Request Ids that the caller of the API has for the current user. +- **`query_string` (Optional, string)**: If the SAML IdP sends the logout response with the HTTP-Redirect binding, this field must be set to the query string of the redirect URI. +- **`content` (Optional, string)**: If the SAML IdP sends the logout response with the HTTP-Post binding, this field must be set to the value of the SAMLResponse form parameter from the logout response. -* **Request (object):** - - * **`docs` ({ _id, _index, _source }[])**: Sample documents to test in the pipeline. - * **`index` (Optional, string)**: The index to simulate ingesting into. This value can be overridden by specifying an index on each document. If you specify this parameter in the request path, it is used for any documents that do not explicitly specify an index argument. - * **`component_template_substitutions` (Optional, Record)**: A map of component template names to substitute component template definition objects. - * **`index_template_subtitutions` (Optional, Record)**: A map of index template names to substitute index template definition objects. - * **`mapping_addition` (Optional, { all_field, date_detection, dynamic, dynamic_date_formats, dynamic_templates, _field_names, index_field, _meta, numeric_detection, properties, _routing, _size, _source, runtime, enabled, subobjects, _data_stream_timestamp })** - * **`pipeline_substitutions` (Optional, Record)**: Pipelines to test. If you don’t specify the `pipeline` request path parameter, this parameter is required. If you specify both this and the request path parameter, the API only uses the request path parameter. - * **`pipeline` (Optional, string)**: The pipeline to use as the default pipeline. This value can be used to override the default pipeline of the index. - - - -## slm [_slm] +## client.security.samlInvalidate [_security.saml_invalidate] +Invalidate SAML. +Submit a SAML LogoutRequest message to Elasticsearch for consumption. -### delete_lifecycle [_delete_lifecycle_2] +NOTE: This API is intended for use by custom web applications other than Kibana. +If you are using Kibana, refer to the documentation for configuring SAML single-sign-on on the Elastic Stack. -Delete a policy. Delete a snapshot lifecycle policy definition. This operation prevents any future snapshots from being taken but does not cancel in-progress snapshots or remove previously-taken snapshots. +The logout request comes from the SAML IdP during an IdP initiated Single Logout. +The custom web application can use this API to have Elasticsearch process the `LogoutRequest`. +After successful validation of the request, Elasticsearch invalidates the access token and refresh token that corresponds to that specific SAML principal and provides a URL that contains a SAML LogoutResponse message. +Thus the user can be redirected back to their IdP. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-delete-lifecycle) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-security-saml-invalidate) ```ts -client.slm.deleteLifecycle({ policy_id }) +client.security.samlInvalidate({ query_string }) ``` +### Arguments [_arguments_security.saml_invalidate] -### Arguments [_arguments_421] - -* **Request (object):** - - * **`policy_id` (string)**: The id of the snapshot lifecycle policy to remove - * **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - * **`timeout` (Optional, string | -1 | 0)**: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +#### Request (object) [_request_security.saml_invalidate] +- **`query_string` (string)**: The query part of the URL that the user was redirected to by the SAML IdP to initiate the Single Logout. +This query should include a single parameter named `SAMLRequest` that contains a SAML logout request that is deflated and Base64 encoded. +If the SAML IdP has signed the logout request, the URL should include two extra parameters named `SigAlg` and `Signature` that contain the algorithm used for the signature and the signature value itself. +In order for Elasticsearch to be able to verify the IdP's signature, the value of the `query_string` field must be an exact match to the string provided by the browser. +The client application must not attempt to parse or process the string in any way. +- **`acs` (Optional, string)**: The Assertion Consumer Service URL that matches the one of the SAML realm in Elasticsearch that should be used. You must specify either this parameter or the `realm` parameter. +- **`realm` (Optional, string)**: The name of the SAML realm in Elasticsearch the configuration. You must specify either this parameter or the `acs` parameter. +## client.security.samlLogout [_security.saml_logout] +Logout of SAML. +Submits a request to invalidate an access token and refresh token. -### execute_lifecycle [_execute_lifecycle] +NOTE: This API is intended for use by custom web applications other than Kibana. +If you are using Kibana, refer to the documentation for configuring SAML single-sign-on on the Elastic Stack. -Run a policy. Immediately create a snapshot according to the snapshot lifecycle policy without waiting for the scheduled time. The snapshot policy is normally applied according to its schedule, but you might want to manually run a policy before performing an upgrade or other maintenance. +This API invalidates the tokens that were generated for a user by the SAML authenticate API. +If the SAML realm in Elasticsearch is configured accordingly and the SAML IdP supports this, the Elasticsearch response contains a URL to redirect the user to the IdP that contains a SAML logout request (starting an SP-initiated SAML Single Logout). -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-execute-lifecycle) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-security-saml-logout) ```ts -client.slm.executeLifecycle({ policy_id }) +client.security.samlLogout({ token }) ``` +### Arguments [_arguments_security.saml_logout] -### Arguments [_arguments_422] - -* **Request (object):** - - * **`policy_id` (string)**: The id of the snapshot lifecycle policy to be executed - * **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - * **`timeout` (Optional, string | -1 | 0)**: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +#### Request (object) [_request_security.saml_logout] +- **`token` (string)**: The access token that was returned as a response to calling the SAML authenticate API. +Alternatively, the most recent token that was received after refreshing the original one by using a `refresh_token`. +- **`refresh_token` (Optional, string)**: The refresh token that was returned as a response to calling the SAML authenticate API. +Alternatively, the most recent refresh token that was received after refreshing the original access token. +## client.security.samlPrepareAuthentication [_security.saml_prepare_authentication] +Prepare SAML authentication. +Create a SAML authentication request (``) as a URL string based on the configuration of the respective SAML realm in Elasticsearch. -### execute_retention [_execute_retention] +NOTE: This API is intended for use by custom web applications other than Kibana. +If you are using Kibana, refer to the documentation for configuring SAML single-sign-on on the Elastic Stack. -Run a retention policy. Manually apply the retention policy to force immediate removal of snapshots that are expired according to the snapshot lifecycle policy retention rules. The retention policy is normally applied according to its schedule. +This API returns a URL pointing to the SAML Identity Provider. +You can use the URL to redirect the browser of the user in order to continue the authentication process. +The URL includes a single parameter named `SAMLRequest`, which contains a SAML Authentication request that is deflated and Base64 encoded. +If the configuration dictates that SAML authentication requests should be signed, the URL has two extra parameters named `SigAlg` and `Signature`. +These parameters contain the algorithm used for the signature and the signature value itself. +It also returns a random string that uniquely identifies this SAML Authentication request. +The caller of this API needs to store this identifier as it needs to be used in a following step of the authentication process. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-execute-retention) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-security-saml-prepare-authentication) ```ts -client.slm.executeRetention({ ... }) +client.security.samlPrepareAuthentication({ ... }) ``` +### Arguments [_arguments_security.saml_prepare_authentication] -### Arguments [_arguments_423] - -* **Request (object):** - - * **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - * **`timeout` (Optional, string | -1 | 0)**: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - +#### Request (object) [_request_security.saml_prepare_authentication] +- **`acs` (Optional, string)**: The Assertion Consumer Service URL that matches the one of the SAML realms in Elasticsearch. +The realm is used to generate the authentication request. You must specify either this parameter or the `realm` parameter. +- **`realm` (Optional, string)**: The name of the SAML realm in Elasticsearch for which the configuration is used to generate the authentication request. +You must specify either this parameter or the `acs` parameter. +- **`relay_state` (Optional, string)**: A string that will be included in the redirect URL that this API returns as the `RelayState` query parameter. +If the Authentication Request is signed, this value is used as part of the signature computation. +## client.security.samlServiceProviderMetadata [_security.saml_service_provider_metadata] +Create SAML service provider metadata. -### get_lifecycle [_get_lifecycle_2] +Generate SAML metadata for a SAML 2.0 Service Provider. -Get policy information. Get snapshot lifecycle policy definitions and information about the latest snapshot attempts. +The SAML 2.0 specification provides a mechanism for Service Providers to describe their capabilities and configuration using a metadata file. +This API generates Service Provider metadata based on the configuration of a SAML realm in Elasticsearch. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-get-lifecycle) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-security-saml-service-provider-metadata) ```ts -client.slm.getLifecycle({ ... }) +client.security.samlServiceProviderMetadata({ realm_name }) ``` +### Arguments [_arguments_security.saml_service_provider_metadata] -### Arguments [_arguments_424] - -* **Request (object):** - - * **`policy_id` (Optional, string | string[])**: List of snapshot lifecycle policies to retrieve - * **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - * **`timeout` (Optional, string | -1 | 0)**: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - +#### Request (object) [_request_security.saml_service_provider_metadata] +- **`realm_name` (string)**: The name of the SAML realm in Elasticsearch. +## client.security.suggestUserProfiles [_security.suggest_user_profiles] +Suggest a user profile. -### get_stats [_get_stats] +Get suggestions for user profiles that match specified search criteria. -Get snapshot lifecycle management statistics. Get global and policy-level statistics about actions taken by snapshot lifecycle management. +NOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions. +Individual users and external applications should not call this API directly. +Elastic reserves the right to change or remove this feature in future releases without prior notice. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-get-stats) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-security-suggest-user-profiles) ```ts -client.slm.getStats({ ... }) +client.security.suggestUserProfiles({ ... }) ``` +### Arguments [_arguments_security.suggest_user_profiles] -### Arguments [_arguments_425] +#### Request (object) [_request_security.suggest_user_profiles] +- **`name` (Optional, string)**: A query string used to match name-related fields in user profile documents. +Name-related fields are the user's `username`, `full_name`, and `email`. +- **`size` (Optional, number)**: The number of profiles to return. +- **`data` (Optional, string | string[])**: A list of filters for the `data` field of the profile document. +To return all content use `data=*`. +To return a subset of content, use `data=` to retrieve content nested under the specified ``. +By default, the API returns no `data` content. +It is an error to specify `data` as both the query parameter and the request body field. +- **`hint` (Optional, { uids, labels })**: Extra search criteria to improve relevance of the suggestion result. +Profiles matching the spcified hint are ranked higher in the response. +Profiles not matching the hint aren't excluded from the response as long as the profile matches the `name` field query. -* **Request (object):** +## client.security.updateApiKey [_security.update_api_key] +Update an API key. - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +Update attributes of an existing API key. +This API supports updates to an API key's access scope, expiration, and metadata. +To use this API, you must have at least the `manage_own_api_key` cluster privilege. +Users can only update API keys that they created or that were granted to them. +To update another user’s API key, use the `run_as` feature to submit a request on behalf of another user. +IMPORTANT: It's not possible to use an API key as the authentication credential for this API. The owner user’s credentials are required. -### get_status [_get_status_3] +Use this API to update API keys created by the create API key or grant API Key APIs. +If you need to apply the same update to many API keys, you can use the bulk update API keys API to reduce overhead. +It's not possible to update expired API keys or API keys that have been invalidated by the invalidate API key API. -Get the snapshot lifecycle management status. +The access scope of an API key is derived from the `role_descriptors` you specify in the request and a snapshot of the owner user's permissions at the time of the request. +The snapshot of the owner's permissions is updated automatically on every call. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-get-status) +IMPORTANT: If you don't specify `role_descriptors` in the request, a call to this API might still change the API key's access scope. +This change can occur if the owner user's permissions have changed since the API key was created or last modified. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-security-update-api-key) ```ts -client.slm.getStatus({ ... }) +client.security.updateApiKey({ id }) ``` +### Arguments [_arguments_security.update_api_key] + +#### Request (object) [_request_security.update_api_key] +- **`id` (string)**: The ID of the API key to update. +- **`role_descriptors` (Optional, Record)**: The role descriptors to assign to this API key. +The API key's effective permissions are an intersection of its assigned privileges and the point in time snapshot of permissions of the owner user. +You can assign new privileges by specifying them in this parameter. +To remove assigned privileges, you can supply an empty `role_descriptors` parameter, that is to say, an empty object `{}`. +If an API key has no assigned privileges, it inherits the owner user's full permissions. +The snapshot of the owner's permissions is always updated, whether you supply the `role_descriptors` parameter or not. +The structure of a role descriptor is the same as the request for the create API keys API. +- **`metadata` (Optional, Record)**: Arbitrary metadata that you want to associate with the API key. +It supports a nested data structure. +Within the metadata object, keys beginning with `_` are reserved for system usage. +When specified, this value fully replaces the metadata previously associated with the API key. +- **`expiration` (Optional, string | -1 | 0)**: The expiration time for the API key. +By default, API keys never expire. +This property can be omitted to leave the expiration unchanged. + +## client.security.updateCrossClusterApiKey [_security.update_cross_cluster_api_key] +Update a cross-cluster API key. -### Arguments [_arguments_426] - -* **Request (object):** +Update the attributes of an existing cross-cluster API key, which is used for API key based remote cluster access. - * **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. - * **`timeout` (Optional, string | -1 | 0)**: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. +To use this API, you must have at least the `manage_security` cluster privilege. +Users can only update API keys that they created. +To update another user's API key, use the `run_as` feature to submit a request on behalf of another user. +IMPORTANT: It's not possible to use an API key as the authentication credential for this API. +To update an API key, the owner user's credentials are required. +It's not possible to update expired API keys, or API keys that have been invalidated by the invalidate API key API. -### put_lifecycle [_put_lifecycle_2] +This API supports updates to an API key's access scope, metadata, and expiration. +The owner user's information, such as the `username` and `realm`, is also updated automatically on every call. -Create or update a policy. Create or update a snapshot lifecycle policy. If the policy already exists, this request increments the policy version. Only the latest version of a policy is stored. +NOTE: This API cannot update REST API keys, which should be updated by either the update API key or bulk update API keys API. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-put-lifecycle) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-security-update-cross-cluster-api-key) ```ts -client.slm.putLifecycle({ policy_id }) +client.security.updateCrossClusterApiKey({ id, access }) ``` +### Arguments [_arguments_security.update_cross_cluster_api_key] -### Arguments [_arguments_427] - -* **Request (object):** - - * **`policy_id` (string)**: The identifier for the snapshot lifecycle policy you want to create or update. - * **`config` (Optional, { ignore_unavailable, indices, include_global_state, feature_states, metadata, partial })**: Configuration for each snapshot created by the policy. - * **`name` (Optional, string)**: Name automatically assigned to each snapshot created by the policy. Date math is supported. To prevent conflicting snapshot names, a UUID is automatically appended to each snapshot name. - * **`repository` (Optional, string)**: Repository used to store snapshots created by this policy. This repository must exist prior to the policy’s creation. You can create a repository using the snapshot repository API. - * **`retention` (Optional, { expire_after, max_count, min_count })**: Retention rules used to retain and delete snapshots created by the policy. - * **`schedule` (Optional, string)**: Periodic or absolute schedule at which the policy creates snapshots. SLM applies schedule changes immediately. - * **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. - * **`timeout` (Optional, string | -1 | 0)**: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. +#### Request (object) [_request_security.update_cross_cluster_api_key] +- **`id` (string)**: The ID of the cross-cluster API key to update. +- **`access` ({ replication, search })**: The access to be granted to this API key. +The access is composed of permissions for cross cluster search and cross cluster replication. +At least one of them must be specified. +When specified, the new access assignment fully replaces the previously assigned access. +- **`expiration` (Optional, string | -1 | 0)**: The expiration time for the API key. +By default, API keys never expire. This property can be omitted to leave the value unchanged. +- **`metadata` (Optional, Record)**: Arbitrary metadata that you want to associate with the API key. +It supports nested data structure. +Within the metadata object, keys beginning with `_` are reserved for system usage. +When specified, this information fully replaces metadata previously associated with the API key. +## client.security.updateSettings [_security.update_settings] +Update security index settings. +Update the user-configurable settings for the security internal index (`.security` and associated indices). Only a subset of settings are allowed to be modified. This includes `index.auto_expand_replicas` and `index.number_of_replicas`. -### start [_start_2] +NOTE: If `index.auto_expand_replicas` is set, `index.number_of_replicas` will be ignored during updates. -Start snapshot lifecycle management. Snapshot lifecycle management (SLM) starts automatically when a cluster is formed. Manually starting SLM is necessary only if it has been stopped using the stop SLM API. +If a specific index is not in use on the system and settings are provided for it, the request will be rejected. +This API does not yet support configuring the settings for indices before they are in use. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-start) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-security-update-settings) ```ts -client.slm.start({ ... }) +client.security.updateSettings({ ... }) ``` +### Arguments [_arguments_security.update_settings] -### Arguments [_arguments_428] +#### Request (object) [_request_security.update_settings] +- **`security` (Optional, { index })**: Settings for the index used for most security configuration, including native realm users and roles configured with the API. +- **`security-profile` (Optional, { index })**: Settings for the index used to store profile information. +- **`security-tokens` (Optional, { index })**: Settings for the index used to store tokens. +- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string | -1 | 0)**: The period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. -* **Request (object):** +## client.security.updateUserProfileData [_security.update_user_profile_data] +Update user profile data. - * **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. - * **`timeout` (Optional, string | -1 | 0)**: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. +Update specific data for the user profile that is associated with a unique ID. +NOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions. +Individual users and external applications should not call this API directly. +Elastic reserves the right to change or remove this feature in future releases without prior notice. +To use this API, you must have one of the following privileges: -### stop [_stop_2] +* The `manage_user_profile` cluster privilege. +* The `update_profile_data` global privilege for the namespaces that are referenced in the request. -Stop snapshot lifecycle management. Stop all snapshot lifecycle management (SLM) operations and the SLM plugin. This API is useful when you are performing maintenance on a cluster and need to prevent SLM from performing any actions on your data streams or indices. Stopping SLM does not stop any snapshots that are in progress. You can manually trigger snapshots with the run snapshot lifecycle policy API even if SLM is stopped. +This API updates the `labels` and `data` fields of an existing user profile document with JSON objects. +New keys and their values are added to the profile document and conflicting keys are replaced by data that's included in the request. -The API returns a response as soon as the request is acknowledged, but the plugin might continue to run until in-progress operations complete and it can be safely stopped. Use the get snapshot lifecycle management status API to see if SLM is running. +For both labels and data, content is namespaced by the top-level fields. +The `update_profile_data` global privilege grants privileges for updating only the allowed namespaces. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-stop) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-security-update-user-profile-data) ```ts -client.slm.stop({ ... }) +client.security.updateUserProfileData({ uid }) ``` +### Arguments [_arguments_security.update_user_profile_data] + +#### Request (object) [_request_security.update_user_profile_data] +- **`uid` (string)**: A unique identifier for the user profile. +- **`labels` (Optional, Record)**: Searchable data that you want to associate with the user profile. +This field supports a nested data structure. +Within the labels object, top-level keys cannot begin with an underscore (`_`) or contain a period (`.`). +- **`data` (Optional, Record)**: Non-searchable data that you want to associate with the user profile. +This field supports a nested data structure. +Within the `data` object, top-level keys cannot begin with an underscore (`_`) or contain a period (`.`). +The data object is not searchable, but can be retrieved with the get user profile API. +- **`if_seq_no` (Optional, number)**: Only perform the operation if the document has this sequence number. +- **`if_primary_term` (Optional, number)**: Only perform the operation if the document has this primary term. +- **`refresh` (Optional, Enum(true | false | "wait_for"))**: If 'true', Elasticsearch refreshes the affected shards to make this operation +visible to search. +If 'wait_for', it waits for a refresh to make this operation visible to search. +If 'false', nothing is done with refreshes. + +## client.shutdown.deleteNode [_shutdown.delete_node] +Cancel node shutdown preparations. +Remove a node from the shutdown list so it can resume normal operations. +You must explicitly clear the shutdown request when a node rejoins the cluster or when a node has permanently left the cluster. +Shutdown requests are never removed automatically by Elasticsearch. + +NOTE: This feature is designed for indirect use by Elastic Cloud, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. +Direct use is not supported. -### Arguments [_arguments_429] +If the operator privileges feature is enabled, you must be an operator to use this API. -* **Request (object):** +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-shutdown-delete-node) - * **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. - * **`timeout` (Optional, string | -1 | 0)**: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. +```ts +client.shutdown.deleteNode({ node_id }) +``` +### Arguments [_arguments_shutdown.delete_node] +#### Request (object) [_request_shutdown.delete_node] +- **`node_id` (string)**: The node id of node to be removed from the shutdown state +- **`master_timeout` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. -## snapshot [_snapshot] +## client.shutdown.getNode [_shutdown.get_node] +Get the shutdown status. +Get information about nodes that are ready to be shut down, have shut down preparations still in progress, or have stalled. +The API returns status information for each part of the shut down process. -### cleanup_repository [_cleanup_repository] +NOTE: This feature is designed for indirect use by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. -Clean up the snapshot repository. Trigger the review of the contents of a snapshot repository and delete any stale data not referenced by existing snapshots. +If the operator privileges feature is enabled, you must be an operator to use this API. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-cleanup-repository) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-shutdown-get-node) ```ts -client.snapshot.cleanupRepository({ repository }) +client.shutdown.getNode({ ... }) ``` +### Arguments [_arguments_shutdown.get_node] + +#### Request (object) [_request_shutdown.get_node] +- **`node_id` (Optional, string | string[])**: Which node for which to retrieve the shutdown status +- **`master_timeout` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. -### Arguments [_arguments_430] +## client.shutdown.putNode [_shutdown.put_node] +Prepare a node to be shut down. -* **Request (object):** +NOTE: This feature is designed for indirect use by Elastic Cloud, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. - * **`repository` (string)**: The name of the snapshot repository to clean up. - * **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. If the master node is not available before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1` - * **`timeout` (Optional, string | -1 | 0)**: The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. If no response is received before the timeout expires, the cluster metadata update still applies but the response will indicate that it was not completely acknowledged. To indicate that the request should never timeout, set it to `-1`. +If you specify a node that is offline, it will be prepared for shut down when it rejoins the cluster. +If the operator privileges feature is enabled, you must be an operator to use this API. +The API migrates ongoing tasks and index shards to other nodes as needed to prepare a node to be restarted or shut down and removed from the cluster. +This ensures that Elasticsearch can be stopped safely with minimal disruption to the cluster. -### clone [_clone_2] +You must specify the type of shutdown: `restart`, `remove`, or `replace`. +If a node is already being prepared for shutdown, you can use this API to change the shutdown type. -Clone a snapshot. Clone part of all of a snapshot into another snapshot in the same repository. +IMPORTANT: This API does NOT terminate the Elasticsearch process. +Monitor the node shutdown status to determine when it is safe to stop Elasticsearch. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-clone) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-shutdown-put-node) ```ts -client.snapshot.clone({ repository, snapshot, target_snapshot, indices }) +client.shutdown.putNode({ node_id, type, reason }) ``` +### Arguments [_arguments_shutdown.put_node] + +#### Request (object) [_request_shutdown.put_node] +- **`node_id` (string)**: The node identifier. +This parameter is not validated against the cluster's active nodes. +This enables you to register a node for shut down while it is offline. +No error is thrown if you specify an invalid node ID. +- **`type` (Enum("restart" | "remove" | "replace"))**: Valid values are restart, remove, or replace. +Use restart when you need to temporarily shut down a node to perform an upgrade, make configuration changes, or perform other maintenance. +Because the node is expected to rejoin the cluster, data is not migrated off of the node. +Use remove when you need to permanently remove a node from the cluster. +The node is not marked ready for shutdown until data is migrated off of the node Use replace to do a 1:1 replacement of a node with another node. +Certain allocation decisions will be ignored (such as disk watermarks) in the interest of true replacement of the source node with the target node. +During a replace-type shutdown, rollover and index creation may result in unassigned shards, and shrink may fail until the replacement is complete. +- **`reason` (string)**: A human-readable reason that the node is being shut down. +This field provides information for other cluster operators; it does not affect the shut down process. +- **`allocation_delay` (Optional, string)**: Only valid if type is restart. +Controls how long Elasticsearch will wait for the node to restart and join the cluster before reassigning its shards to other nodes. +This works the same as delaying allocation with the index.unassigned.node_left.delayed_timeout setting. +If you specify both a restart allocation delay and an index-level allocation delay, the longer of the two is used. +- **`target_node_name` (Optional, string)**: Only valid if type is replace. +Specifies the name of the node that is replacing the node being shut down. +Shards from the shut down node are only allowed to be allocated to the target node, and no other data will be allocated to the target node. +During relocation of data certain allocation rules are ignored, such as disk watermarks or user attribute filtering rules. +- **`master_timeout` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))**: The period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, Enum("nanos" | "micros" | "ms" | "s" | "m" | "h" | "d"))**: The period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. + +## client.simulate.ingest [_simulate.ingest] +Simulate data ingestion. +Run ingest pipelines against a set of provided documents, optionally with substitute pipeline definitions, to simulate ingesting data into an index. + +This API is meant to be used for troubleshooting or pipeline development, as it does not actually index any data into Elasticsearch. + +The API runs the default and final pipeline for that index against a set of documents provided in the body of the request. +If a pipeline contains a reroute processor, it follows that reroute processor to the new index, running that index's pipelines as well the same way that a non-simulated ingest would. +No data is indexed into Elasticsearch. +Instead, the transformed document is returned, along with the list of pipelines that have been run and the name of the index where the document would have been indexed if this were not a simulation. +The transformed document is validated against the mappings that would apply to this index, and any validation error is reported in the result. -### Arguments [_arguments_431] +This API differs from the simulate pipeline API in that you specify a single pipeline for that API, and it runs only that one pipeline. +The simulate pipeline API is more useful for developing a single pipeline, while the simulate ingest API is more useful for troubleshooting the interaction of the various pipelines that get applied when ingesting into an index. -* **Request (object):** +By default, the pipeline definitions that are currently in the system are used. +However, you can supply substitute pipeline definitions in the body of the request. +These will be used in place of the pipeline definitions that are already in the system. This can be used to replace existing pipeline definitions or to create new ones. The pipeline substitutions are used only within this request. - * **`repository` (string)**: The name of the snapshot repository that both source and target snapshot belong to. - * **`snapshot` (string)**: The source snapshot name. - * **`target_snapshot` (string)**: The target snapshot name. - * **`indices` (string)**: A list of indices to include in the snapshot. Multi-target syntax is supported. - * **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. - * **`timeout` (Optional, string | -1 | 0)**: The period of time to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-simulate-ingest) +```ts +client.simulate.ingest({ docs }) +``` +### Arguments [_arguments_simulate.ingest] -### create [_create_3] +#### Request (object) [_request_simulate.ingest] +- **`docs` ({ _id, _index, _source }[])**: Sample documents to test in the pipeline. +- **`index` (Optional, string)**: The index to simulate ingesting into. +This value can be overridden by specifying an index on each document. +If you specify this parameter in the request path, it is used for any documents that do not explicitly specify an index argument. +- **`component_template_substitutions` (Optional, Record)**: A map of component template names to substitute component template definition objects. +- **`index_template_substitutions` (Optional, Record)**: A map of index template names to substitute index template definition objects. +- **`mapping_addition` (Optional, { all_field, date_detection, dynamic, dynamic_date_formats, dynamic_templates, _field_names, index_field, _meta, numeric_detection, properties, _routing, _size, _source, runtime, enabled, subobjects, _data_stream_timestamp })** +- **`pipeline_substitutions` (Optional, Record)**: Pipelines to test. +If you don’t specify the `pipeline` request path parameter, this parameter is required. +If you specify both this and the request path parameter, the API only uses the request path parameter. +- **`pipeline` (Optional, string)**: The pipeline to use as the default pipeline. +This value can be used to override the default pipeline of the index. -Create a snapshot. Take a snapshot of a cluster or of data streams and indices. +## client.slm.deleteLifecycle [_slm.delete_lifecycle] +Delete a policy. +Delete a snapshot lifecycle policy definition. +This operation prevents any future snapshots from being taken but does not cancel in-progress snapshots or remove previously-taken snapshots. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-create) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-slm-delete-lifecycle) ```ts -client.snapshot.create({ repository, snapshot }) +client.slm.deleteLifecycle({ policy_id }) ``` +### Arguments [_arguments_slm.delete_lifecycle] -### Arguments [_arguments_432] +#### Request (object) [_request_slm.delete_lifecycle] +- **`policy_id` (string)**: The id of the snapshot lifecycle policy to remove +- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string | -1 | 0)**: The period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. -* **Request (object):** +## client.slm.executeLifecycle [_slm.execute_lifecycle] +Run a policy. +Immediately create a snapshot according to the snapshot lifecycle policy without waiting for the scheduled time. +The snapshot policy is normally applied according to its schedule, but you might want to manually run a policy before performing an upgrade or other maintenance. - * **`repository` (string)**: The name of the repository for the snapshot. - * **`snapshot` (string)**: The name of the snapshot. It supportes date math. It must be unique in the repository. - * **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Determines how wildcard patterns in the `indices` parameter match data streams and indices. It supports a list of values such as `open,hidden`. - * **`feature_states` (Optional, string[])**: The feature states to include in the snapshot. Each feature state includes one or more system indices containing related data. You can view a list of eligible features using the get features API. +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-slm-execute-lifecycle) +```ts +client.slm.executeLifecycle({ policy_id }) +``` -If `include_global_state` is `true`, all current feature states are included by default. If `include_global_state` is `false`, no feature states are included by default. +### Arguments [_arguments_slm.execute_lifecycle] -Note that specifying an empty array will result in the default behavior. To exclude all feature states, regardless of the `include_global_state` value, specify an array with only the value `none` (`["none"]`). ** *`ignore_unavailable` (Optional, boolean)**: If `true`, the request ignores data streams and indices in `indices` that are missing or closed. If `false`, the request returns an error for any data stream or index that is missing or closed. *** *`include_global_state` (Optional, boolean)**: If `true`, the current cluster state is included in the snapshot. The cluster state includes persistent cluster settings, composable index templates, legacy index templates, ingest pipelines, and ILM policies. It also includes data stored in system indices, such as Watches and task records (configurable via `feature_states`). ** *`indices` (Optional, string | string[])**: A list of data streams and indices to include in the snapshot. It supports a multi-target syntax. The default is an empty array (`[]`), which includes all regular data streams and regular indices. To exclude all data streams and indices, use `-*`. +#### Request (object) [_request_slm.execute_lifecycle] +- **`policy_id` (string)**: The id of the snapshot lifecycle policy to be executed +- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string | -1 | 0)**: The period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. -You can’t use this parameter to include or exclude system indices or system data streams from a snapshot. Use `feature_states` instead. ** *`metadata` (Optional, Record)**: Arbitrary metadata to the snapshot, such as a record of who took the snapshot, why it was taken, or any other useful data. It can have any contents but it must be less than 1024 bytes. This information is not automatically generated by Elasticsearch. ** *`partial` (Optional, boolean)**: If `true`, it enables you to restore a partial snapshot of indices with unavailable shards. Only shards that were successfully included in the snapshot will be restored. All missing shards will be recreated as empty. +## client.slm.executeRetention [_slm.execute_retention] +Run a retention policy. +Manually apply the retention policy to force immediate removal of snapshots that are expired according to the snapshot lifecycle policy retention rules. +The retention policy is normally applied according to its schedule. -If `false`, the entire restore operation will fail if one or more indices included in the snapshot do not have all primary shards available. ** *`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. ** *`wait_for_completion` (Optional, boolean)**: If `true`, the request returns a response when the snapshot is complete. If `false`, the request returns a response when the snapshot initializes. +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-slm-execute-retention) +```ts +client.slm.executeRetention({ ... }) +``` -### create_repository [_create_repository] +### Arguments [_arguments_slm.execute_retention] -Create or update a snapshot repository. IMPORTANT: If you are migrating searchable snapshots, the repository name must be identical in the source and destination clusters. To register a snapshot repository, the cluster’s global metadata must be writeable. Ensure there are no cluster blocks (for example, `cluster.blocks.read_only` and `clsuter.blocks.read_only_allow_delete` settings) that prevent write access. +#### Request (object) [_request_slm.execute_retention] +- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string | -1 | 0)**: The period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. -Several options for this API can be specified using a query parameter or a request body parameter. If both parameters are specified, only the query parameter is used. +## client.slm.getLifecycle [_slm.get_lifecycle] +Get policy information. +Get snapshot lifecycle policy definitions and information about the latest snapshot attempts. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-create-repository) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-slm-get-lifecycle) ```ts -client.snapshot.createRepository({ repository }) +client.slm.getLifecycle({ ... }) ``` +### Arguments [_arguments_slm.get_lifecycle] -### Arguments [_arguments_433] +#### Request (object) [_request_slm.get_lifecycle] +- **`policy_id` (Optional, string | string[])**: List of snapshot lifecycle policies to retrieve +- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string | -1 | 0)**: The period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. -* **Request (object):** +## client.slm.getStats [_slm.get_stats] +Get snapshot lifecycle management statistics. +Get global and policy-level statistics about actions taken by snapshot lifecycle management. - * **`repository` (string)**: The name of the snapshot repository to register or update. - * **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. - * **`timeout` (Optional, string | -1 | 0)**: The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. If no response is received before the timeout expires, the cluster metadata update still applies but the response will indicate that it was not completely acknowledged. To indicate that the request should never timeout, set it to `-1`. - * **`verify` (Optional, boolean)**: If `true`, the request verifies the repository is functional on all master and data nodes in the cluster. If `false`, this verification is skipped. You can also perform this verification with the verify snapshot repository API. +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-slm-get-stats) +```ts +client.slm.getStats({ ... }) +``` +### Arguments [_arguments_slm.get_stats] -### delete [_delete_9] +#### Request (object) [_request_slm.get_stats] +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. -Delete snapshots. +## client.slm.getStatus [_slm.get_status] +Get the snapshot lifecycle management status. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-delete) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-slm-get-status) ```ts -client.snapshot.delete({ repository, snapshot }) +client.slm.getStatus({ ... }) ``` +### Arguments [_arguments_slm.get_status] -### Arguments [_arguments_434] +#### Request (object) [_request_slm.get_status] +- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +To indicate that the request should never timeout, set it to `-1`. +- **`timeout` (Optional, string | -1 | 0)**: The period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. +To indicate that the request should never timeout, set it to `-1`. -* **Request (object):** +## client.slm.putLifecycle [_slm.put_lifecycle] +Create or update a policy. +Create or update a snapshot lifecycle policy. +If the policy already exists, this request increments the policy version. +Only the latest version of a policy is stored. - * **`repository` (string)**: The name of the repository to delete a snapshot from. - * **`snapshot` (string)**: A list of snapshot names to delete. It also accepts wildcards (`*`). - * **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-slm-put-lifecycle) +```ts +client.slm.putLifecycle({ policy_id }) +``` +### Arguments [_arguments_slm.put_lifecycle] -### delete_repository [_delete_repository] +#### Request (object) [_request_slm.put_lifecycle] +- **`policy_id` (string)**: The identifier for the snapshot lifecycle policy you want to create or update. +- **`config` (Optional, { ignore_unavailable, indices, include_global_state, feature_states, metadata, partial })**: Configuration for each snapshot created by the policy. +- **`name` (Optional, string)**: Name automatically assigned to each snapshot created by the policy. Date math is supported. To prevent conflicting snapshot names, a UUID is automatically appended to each snapshot name. +- **`repository` (Optional, string)**: Repository used to store snapshots created by this policy. This repository must exist prior to the policy’s creation. You can create a repository using the snapshot repository API. +- **`retention` (Optional, { expire_after, max_count, min_count })**: Retention rules used to retain and delete snapshots created by the policy. +- **`schedule` (Optional, string)**: Periodic or absolute schedule at which the policy creates snapshots. SLM applies schedule changes immediately. +- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +To indicate that the request should never timeout, set it to `-1`. +- **`timeout` (Optional, string | -1 | 0)**: The period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. +To indicate that the request should never timeout, set it to `-1`. -Delete snapshot repositories. When a repository is unregistered, Elasticsearch removes only the reference to the location where the repository is storing the snapshots. The snapshots themselves are left untouched and in place. +## client.slm.start [_slm.start] +Start snapshot lifecycle management. +Snapshot lifecycle management (SLM) starts automatically when a cluster is formed. +Manually starting SLM is necessary only if it has been stopped using the stop SLM API. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-delete-repository) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-slm-start) ```ts -client.snapshot.deleteRepository({ repository }) +client.slm.start({ ... }) ``` +### Arguments [_arguments_slm.start] -### Arguments [_arguments_435] - -* **Request (object):** +#### Request (object) [_request_slm.start] +- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +To indicate that the request should never timeout, set it to `-1`. +- **`timeout` (Optional, string | -1 | 0)**: The period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. +To indicate that the request should never timeout, set it to `-1`. - * **`repository` (string | string[])**: The ame of the snapshot repositories to unregister. Wildcard (`*`) patterns are supported. - * **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. - * **`timeout` (Optional, string | -1 | 0)**: The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. If no response is received before the timeout expires, the cluster metadata update still applies but the response will indicate that it was not completely acknowledged. To indicate that the request should never timeout, set it to `-1`. +## client.slm.stop [_slm.stop] +Stop snapshot lifecycle management. +Stop all snapshot lifecycle management (SLM) operations and the SLM plugin. +This API is useful when you are performing maintenance on a cluster and need to prevent SLM from performing any actions on your data streams or indices. +Stopping SLM does not stop any snapshots that are in progress. +You can manually trigger snapshots with the run snapshot lifecycle policy API even if SLM is stopped. +The API returns a response as soon as the request is acknowledged, but the plugin might continue to run until in-progress operations complete and it can be safely stopped. +Use the get snapshot lifecycle management status API to see if SLM is running. +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-slm-stop) -### get [_get_9] +```ts +client.slm.stop({ ... }) +``` -Get snapshot information. +### Arguments [_arguments_slm.stop] -::::{note} -The `after` parameter and `next` field enable you to iterate through snapshots with some consistency guarantees regarding concurrent creation or deletion of snapshots. It is guaranteed that any snapshot that exists at the beginning of the iteration and is not concurrently deleted will be seen during the iteration. Snapshots concurrently created may be seen during an iteration. -:::: +#### Request (object) [_request_slm.stop] +- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +To indicate that the request should never timeout, set it to `-1`. +- **`timeout` (Optional, string | -1 | 0)**: The period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. +To indicate that the request should never timeout, set it to `-1`. +## client.snapshot.cleanupRepository [_snapshot.cleanup_repository] +Clean up the snapshot repository. +Trigger the review of the contents of a snapshot repository and delete any stale data not referenced by existing snapshots. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-get) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-snapshot-cleanup-repository) ```ts -client.snapshot.get({ repository, snapshot }) +client.snapshot.cleanupRepository({ repository }) ``` +### Arguments [_arguments_snapshot.cleanup_repository] -### Arguments [_arguments_436] +#### Request (object) [_request_snapshot.cleanup_repository] +- **`repository` (string)**: The name of the snapshot repository to clean up. +- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. +If the master node is not available before the timeout expires, the request fails and returns an error. +To indicate that the request should never timeout, set it to `-1` +- **`timeout` (Optional, string | -1 | 0)**: The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. +If no response is received before the timeout expires, the cluster metadata update still applies but the response will indicate that it was not completely acknowledged. +To indicate that the request should never timeout, set it to `-1`. -* **Request (object):** +## client.snapshot.clone [_snapshot.clone] +Clone a snapshot. +Clone part of all of a snapshot into another snapshot in the same repository. - * **`repository` (string)**: A list of snapshot repository names used to limit the request. Wildcard (`*`) expressions are supported. - * **`snapshot` (string | string[])**: A list of snapshot names to retrieve Wildcards (`*`) are supported. +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-snapshot-clone) -* To get information about all snapshots in a registered repository, use a wildcard (`*`) or `_all`. -* To get information about any snapshots that are currently running, use `_current`. +```ts +client.snapshot.clone({ repository, snapshot, target_snapshot, indices }) +``` + +### Arguments [_arguments_snapshot.clone] - * **`after` (Optional, string)**: An offset identifier to start pagination from as returned by the next field in the response body. - * **`from_sort_value` (Optional, string)**: The value of the current sort column at which to start retrieval. It can be a string `snapshot-` or a repository name when sorting by snapshot or repository name. It can be a millisecond time value or a number when sorting by `index-` or shard count. - * **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error for any snapshots that are unavailable. - * **`index_details` (Optional, boolean)**: If `true`, the response includes additional information about each index in the snapshot comprising the number of shards in the index, the total size of the index in bytes, and the maximum number of segments per shard in the index. The default is `false`, meaning that this information is omitted. - * **`index_names` (Optional, boolean)**: If `true`, the response includes the name of each index in each snapshot. - * **`include_repository` (Optional, boolean)**: If `true`, the response includes the repository name in each snapshot. - * **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - * **`order` (Optional, Enum("asc" | "desc"))**: The sort order. Valid values are `asc` for ascending and `desc` for descending order. The default behavior is ascending order. - * **`offset` (Optional, number)**: Numeric offset to start pagination from based on the snapshots matching this request. Using a non-zero value for this parameter is mutually exclusive with using the after parameter. Defaults to 0. - * **`size` (Optional, number)**: The maximum number of snapshots to return. The default is 0, which means to return all that match the request without limit. - * **`slm_policy_filter` (Optional, string)**: Filter snapshots by a list of snapshot lifecycle management (SLM) policy names that snapshots belong to. +#### Request (object) [_request_snapshot.clone] +- **`repository` (string)**: The name of the snapshot repository that both source and target snapshot belong to. +- **`snapshot` (string)**: The source snapshot name. +- **`target_snapshot` (string)**: The target snapshot name. +- **`indices` (string)**: A list of indices to include in the snapshot. +Multi-target syntax is supported. +- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for the master node. +If the master node is not available before the timeout expires, the request fails and returns an error. +To indicate that the request should never timeout, set it to `-1`. +## client.snapshot.create [_snapshot.create] +Create a snapshot. +Take a snapshot of a cluster or of data streams and indices. -You can use wildcards (`*`) and combinations of wildcards followed by exclude patterns starting with `-`. For example, the pattern `*,-policy-a-\*` will return all snapshots except for those that were created by an SLM policy with a name starting with `policy-a-`. Note that the wildcard pattern `*` matches all snapshots created by an SLM policy but not those snapshots that were not created by an SLM policy. To include snapshots that were not created by an SLM policy, you can use the special pattern `_none` that will match all snapshots without an SLM policy. ** *`sort` (Optional, Enum("start_time" | "duration" | "name" | "index_count" | "repository" | "shard_count" | "failed_shard_count"))**: The sort order for the result. The default behavior is sorting by snapshot start time stamp. ** *`verbose` (Optional, boolean)**: If `true`, returns additional information about each snapshot such as the version of Elasticsearch which took the snapshot, the start and end times of the snapshot, and the number of shards snapshotted. +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-snapshot-create) -::::{note} -The parameters `size`, `order`, `after`, `from_sort_value`, `offset`, `slm_policy_filter`, and `sort` are not supported when you set `verbose=false` and the sort order for requests with `verbose=false` is undefined. -:::: +```ts +client.snapshot.create({ repository, snapshot }) +``` + +### Arguments [_arguments_snapshot.create] + +#### Request (object) [_request_snapshot.create] +- **`repository` (string)**: The name of the repository for the snapshot. +- **`snapshot` (string)**: The name of the snapshot. +It supportes date math. +It must be unique in the repository. +- **`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])**: Determines how wildcard patterns in the `indices` parameter match data streams and indices. +It supports a list of values such as `open,hidden`. +- **`feature_states` (Optional, string[])**: The feature states to include in the snapshot. +Each feature state includes one or more system indices containing related data. +You can view a list of eligible features using the get features API. + +If `include_global_state` is `true`, all current feature states are included by default. +If `include_global_state` is `false`, no feature states are included by default. + +Note that specifying an empty array will result in the default behavior. +To exclude all feature states, regardless of the `include_global_state` value, specify an array with only the value `none` (`["none"]`). +- **`ignore_unavailable` (Optional, boolean)**: If `true`, the request ignores data streams and indices in `indices` that are missing or closed. +If `false`, the request returns an error for any data stream or index that is missing or closed. +- **`include_global_state` (Optional, boolean)**: If `true`, the current cluster state is included in the snapshot. +The cluster state includes persistent cluster settings, composable index templates, legacy index templates, ingest pipelines, and ILM policies. +It also includes data stored in system indices, such as Watches and task records (configurable via `feature_states`). +- **`indices` (Optional, string | string[])**: A list of data streams and indices to include in the snapshot. +It supports a multi-target syntax. +The default is an empty array (`[]`), which includes all regular data streams and regular indices. +To exclude all data streams and indices, use `-*`. + +You can't use this parameter to include or exclude system indices or system data streams from a snapshot. +Use `feature_states` instead. +- **`metadata` (Optional, Record)**: Arbitrary metadata to the snapshot, such as a record of who took the snapshot, why it was taken, or any other useful data. +It can have any contents but it must be less than 1024 bytes. +This information is not automatically generated by Elasticsearch. +- **`partial` (Optional, boolean)**: If `true`, it enables you to restore a partial snapshot of indices with unavailable shards. +Only shards that were successfully included in the snapshot will be restored. +All missing shards will be recreated as empty. + +If `false`, the entire restore operation will fail if one or more indices included in the snapshot do not have all primary shards available. +- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +- **`wait_for_completion` (Optional, boolean)**: If `true`, the request returns a response when the snapshot is complete. +If `false`, the request returns a response when the snapshot initializes. + +## client.snapshot.createRepository [_snapshot.create_repository] +Create or update a snapshot repository. +IMPORTANT: If you are migrating searchable snapshots, the repository name must be identical in the source and destination clusters. +To register a snapshot repository, the cluster's global metadata must be writeable. +Ensure there are no cluster blocks (for example, `cluster.blocks.read_only` and `clsuter.blocks.read_only_allow_delete` settings) that prevent write access. + +Several options for this API can be specified using a query parameter or a request body parameter. +If both parameters are specified, only the query parameter is used. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-snapshot-create-repository) +```ts +client.snapshot.createRepository({ repository }) +``` +### Arguments [_arguments_snapshot.create_repository] -### get_repository [_get_repository] +#### Request (object) [_request_snapshot.create_repository] +- **`repository` (string)**: The name of the snapshot repository to register or update. +- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for the master node. +If the master node is not available before the timeout expires, the request fails and returns an error. +To indicate that the request should never timeout, set it to `-1`. +- **`timeout` (Optional, string | -1 | 0)**: The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. +If no response is received before the timeout expires, the cluster metadata update still applies but the response will indicate that it was not completely acknowledged. +To indicate that the request should never timeout, set it to `-1`. +- **`verify` (Optional, boolean)**: If `true`, the request verifies the repository is functional on all master and data nodes in the cluster. +If `false`, this verification is skipped. +You can also perform this verification with the verify snapshot repository API. -Get snapshot repository information. +## client.snapshot.delete [_snapshot.delete] +Delete snapshots. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-get-repository) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-snapshot-delete) ```ts -client.snapshot.getRepository({ ... }) +client.snapshot.delete({ repository, snapshot }) ``` +### Arguments [_arguments_snapshot.delete] + +#### Request (object) [_request_snapshot.delete] +- **`repository` (string)**: The name of the repository to delete a snapshot from. +- **`snapshot` (string)**: A list of snapshot names to delete. +It also accepts wildcards (`*`). +- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for the master node. +If the master node is not available before the timeout expires, the request fails and returns an error. +To indicate that the request should never timeout, set it to `-1`. -### Arguments [_arguments_437] +## client.snapshot.deleteRepository [_snapshot.delete_repository] +Delete snapshot repositories. +When a repository is unregistered, Elasticsearch removes only the reference to the location where the repository is storing the snapshots. +The snapshots themselves are left untouched and in place. -* **Request (object):** +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-snapshot-delete-repository) - * **`repository` (Optional, string | string[])**: A list of snapshot repository names used to limit the request. Wildcard (`*`) expressions are supported including combining wildcards with exclude patterns starting with `-`. +```ts +client.snapshot.deleteRepository({ repository }) +``` +### Arguments [_arguments_snapshot.delete_repository] -To get information about all snapshot repositories registered in the cluster, omit this parameter or use `*` or `_all`. ** *`local` (Optional, boolean)**: If `true`, the request gets information from the local node only. If `false`, the request gets information from the master node. ** *`master_timeout` (Optional, string | -1 | 0)**: The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. +#### Request (object) [_request_snapshot.delete_repository] +- **`repository` (string | string[])**: The ame of the snapshot repositories to unregister. +Wildcard (`*`) patterns are supported. +- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for the master node. +If the master node is not available before the timeout expires, the request fails and returns an error. +To indicate that the request should never timeout, set it to `-1`. +- **`timeout` (Optional, string | -1 | 0)**: The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. +If no response is received before the timeout expires, the cluster metadata update still applies but the response will indicate that it was not completely acknowledged. +To indicate that the request should never timeout, set it to `-1`. +## client.snapshot.get [_snapshot.get] +Get snapshot information. -### repository_analyze [_repository_analyze] +NOTE: The `after` parameter and `next` field enable you to iterate through snapshots with some consistency guarantees regarding concurrent creation or deletion of snapshots. +It is guaranteed that any snapshot that exists at the beginning of the iteration and is not concurrently deleted will be seen during the iteration. +Snapshots concurrently created may be seen during an iteration. -Analyze a snapshot repository. Analyze the performance characteristics and any incorrect behaviour found in a repository. +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-snapshot-get) -The response exposes implementation details of the analysis which may change from version to version. The response body format is therefore not considered stable and may be different in newer versions. +```ts +client.snapshot.get({ repository, snapshot }) +``` -There are a large number of third-party storage systems available, not all of which are suitable for use as a snapshot repository by Elasticsearch. Some storage systems behave incorrectly, or perform poorly, especially when accessed concurrently by multiple clients as the nodes of an Elasticsearch cluster do. This API performs a collection of read and write operations on your repository which are designed to detect incorrect behaviour and to measure the performance characteristics of your storage system. +### Arguments [_arguments_snapshot.get] -The default values for the parameters are deliberately low to reduce the impact of running an analysis inadvertently and to provide a sensible starting point for your investigations. Run your first analysis with the default parameter values to check for simple problems. If successful, run a sequence of increasingly large analyses until you encounter a failure or you reach a `blob_count` of at least `2000`, a `max_blob_size` of at least `2gb`, a `max_total_data_size` of at least `1tb`, and a `register_operation_count` of at least `100`. Always specify a generous timeout, possibly `1h` or longer, to allow time for each analysis to run to completion. Perform the analyses using a multi-node cluster of a similar size to your production cluster so that it can detect any problems that only arise when the repository is accessed by many nodes at once. +#### Request (object) [_request_snapshot.get] +- **`repository` (string)**: A list of snapshot repository names used to limit the request. +Wildcard (`*`) expressions are supported. +- **`snapshot` (string | string[])**: A list of snapshot names to retrieve +Wildcards (`*`) are supported. -If the analysis fails, Elasticsearch detected that your repository behaved unexpectedly. This usually means you are using a third-party storage system with an incorrect or incompatible implementation of the API it claims to support. If so, this storage system is not suitable for use as a snapshot repository. You will need to work with the supplier of your storage system to address the incompatibilities that Elasticsearch detects. +* To get information about all snapshots in a registered repository, use a wildcard (`*`) or `_all`. +* To get information about any snapshots that are currently running, use `_current`. +- **`after` (Optional, string)**: An offset identifier to start pagination from as returned by the next field in the response body. +- **`from_sort_value` (Optional, string)**: The value of the current sort column at which to start retrieval. +It can be a string `snapshot-` or a repository name when sorting by snapshot or repository name. +It can be a millisecond time value or a number when sorting by `index-` or shard count. +- **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error for any snapshots that are unavailable. +- **`index_details` (Optional, boolean)**: If `true`, the response includes additional information about each index in the snapshot comprising the number of shards in the index, the total size of the index in bytes, and the maximum number of segments per shard in the index. +The default is `false`, meaning that this information is omitted. +- **`index_names` (Optional, boolean)**: If `true`, the response includes the name of each index in each snapshot. +- **`include_repository` (Optional, boolean)**: If `true`, the response includes the repository name in each snapshot. +- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +- **`order` (Optional, Enum("asc" | "desc"))**: The sort order. +Valid values are `asc` for ascending and `desc` for descending order. +The default behavior is ascending order. +- **`offset` (Optional, number)**: Numeric offset to start pagination from based on the snapshots matching this request. Using a non-zero value for this parameter is mutually exclusive with using the after parameter. Defaults to 0. +- **`size` (Optional, number)**: The maximum number of snapshots to return. +The default is 0, which means to return all that match the request without limit. +- **`slm_policy_filter` (Optional, string)**: Filter snapshots by a list of snapshot lifecycle management (SLM) policy names that snapshots belong to. + +You can use wildcards (`*`) and combinations of wildcards followed by exclude patterns starting with `-`. +For example, the pattern `*,-policy-a-\*` will return all snapshots except for those that were created by an SLM policy with a name starting with `policy-a-`. +Note that the wildcard pattern `*` matches all snapshots created by an SLM policy but not those snapshots that were not created by an SLM policy. +To include snapshots that were not created by an SLM policy, you can use the special pattern `_none` that will match all snapshots without an SLM policy. +- **`sort` (Optional, Enum("start_time" | "duration" | "name" | "index_count" | "repository" | "shard_count" | "failed_shard_count"))**: The sort order for the result. +The default behavior is sorting by snapshot start time stamp. +- **`verbose` (Optional, boolean)**: If `true`, returns additional information about each snapshot such as the version of Elasticsearch which took the snapshot, the start and end times of the snapshot, and the number of shards snapshotted. + +NOTE: The parameters `size`, `order`, `after`, `from_sort_value`, `offset`, `slm_policy_filter`, and `sort` are not supported when you set `verbose=false` and the sort order for requests with `verbose=false` is undefined. + +## client.snapshot.getRepository [_snapshot.get_repository] +Get snapshot repository information. -If the analysis is successful, the API returns details of the testing process, optionally including how long each operation took. You can use this information to determine the performance of your storage system. If any operation fails or returns an incorrect result, the API returns an error. If the API returns an error, it may not have removed all the data it wrote to the repository. The error will indicate the location of any leftover data and this path is also recorded in the Elasticsearch logs. You should verify that this location has been cleaned up correctly. If there is still leftover data at the specified location, you should manually remove it. +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-snapshot-get-repository) -If the connection from your client to Elasticsearch is closed while the client is waiting for the result of the analysis, the test is cancelled. Some clients are configured to close their connection if no response is received within a certain timeout. An analysis takes a long time to complete so you might need to relax any such client-side timeouts. On cancellation the analysis attempts to clean up the data it was writing, but it may not be able to remove it all. The path to the leftover data is recorded in the Elasticsearch logs. You should verify that this location has been cleaned up correctly. If there is still leftover data at the specified location, you should manually remove it. +```ts +client.snapshot.getRepository({ ... }) +``` -If the analysis is successful then it detected no incorrect behaviour, but this does not mean that correct behaviour is guaranteed. The analysis attempts to detect common bugs but it does not offer 100% coverage. Additionally, it does not test the following: +### Arguments [_arguments_snapshot.get_repository] + +#### Request (object) [_request_snapshot.get_repository] +- **`repository` (Optional, string | string[])**: A list of snapshot repository names used to limit the request. +Wildcard (`*`) expressions are supported including combining wildcards with exclude patterns starting with `-`. + +To get information about all snapshot repositories registered in the cluster, omit this parameter or use `*` or `_all`. +- **`local` (Optional, boolean)**: If `true`, the request gets information from the local node only. +If `false`, the request gets information from the master node. +- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for the master node. +If the master node is not available before the timeout expires, the request fails and returns an error. +To indicate that the request should never timeout, set it to `-1`. + +## client.snapshot.repositoryAnalyze [_snapshot.repository_analyze] +Analyze a snapshot repository. +Analyze the performance characteristics and any incorrect behaviour found in a repository. + +The response exposes implementation details of the analysis which may change from version to version. +The response body format is therefore not considered stable and may be different in newer versions. + +There are a large number of third-party storage systems available, not all of which are suitable for use as a snapshot repository by Elasticsearch. +Some storage systems behave incorrectly, or perform poorly, especially when accessed concurrently by multiple clients as the nodes of an Elasticsearch cluster do. This API performs a collection of read and write operations on your repository which are designed to detect incorrect behaviour and to measure the performance characteristics of your storage system. + +The default values for the parameters are deliberately low to reduce the impact of running an analysis inadvertently and to provide a sensible starting point for your investigations. +Run your first analysis with the default parameter values to check for simple problems. +If successful, run a sequence of increasingly large analyses until you encounter a failure or you reach a `blob_count` of at least `2000`, a `max_blob_size` of at least `2gb`, a `max_total_data_size` of at least `1tb`, and a `register_operation_count` of at least `100`. +Always specify a generous timeout, possibly `1h` or longer, to allow time for each analysis to run to completion. +Perform the analyses using a multi-node cluster of a similar size to your production cluster so that it can detect any problems that only arise when the repository is accessed by many nodes at once. + +If the analysis fails, Elasticsearch detected that your repository behaved unexpectedly. +This usually means you are using a third-party storage system with an incorrect or incompatible implementation of the API it claims to support. +If so, this storage system is not suitable for use as a snapshot repository. +You will need to work with the supplier of your storage system to address the incompatibilities that Elasticsearch detects. + +If the analysis is successful, the API returns details of the testing process, optionally including how long each operation took. +You can use this information to determine the performance of your storage system. +If any operation fails or returns an incorrect result, the API returns an error. +If the API returns an error, it may not have removed all the data it wrote to the repository. +The error will indicate the location of any leftover data and this path is also recorded in the Elasticsearch logs. +You should verify that this location has been cleaned up correctly. +If there is still leftover data at the specified location, you should manually remove it. + +If the connection from your client to Elasticsearch is closed while the client is waiting for the result of the analysis, the test is cancelled. +Some clients are configured to close their connection if no response is received within a certain timeout. +An analysis takes a long time to complete so you might need to relax any such client-side timeouts. +On cancellation the analysis attempts to clean up the data it was writing, but it may not be able to remove it all. +The path to the leftover data is recorded in the Elasticsearch logs. +You should verify that this location has been cleaned up correctly. +If there is still leftover data at the specified location, you should manually remove it. + +If the analysis is successful then it detected no incorrect behaviour, but this does not mean that correct behaviour is guaranteed. +The analysis attempts to detect common bugs but it does not offer 100% coverage. +Additionally, it does not test the following: * Your repository must perform durable writes. Once a blob has been written it must remain in place until it is deleted, even after a power loss or similar disaster. * Your repository must not suffer from silent data corruption. Once a blob has been written, its contents must remain unchanged until it is deliberately modified or deleted. * Your repository must behave correctly even if connectivity from the cluster is disrupted. Reads and writes may fail in this case, but they must not return incorrect results. -::::{important} -An analysis writes a substantial amount of data to your repository and then reads it back again. This consumes bandwidth on the network between the cluster and the repository, and storage space and I/O bandwidth on the repository itself. You must ensure this load does not affect other users of these systems. Analyses respect the repository settings `max_snapshot_bytes_per_sec` and `max_restore_bytes_per_sec` if available and the cluster setting `indices.recovery.max_bytes_per_sec` which you can use to limit the bandwidth they consume. -:::: - - -::::{note} -This API is intended for exploratory use by humans. You should expect the request parameters and the response format to vary in future versions. -:::: - +IMPORTANT: An analysis writes a substantial amount of data to your repository and then reads it back again. +This consumes bandwidth on the network between the cluster and the repository, and storage space and I/O bandwidth on the repository itself. +You must ensure this load does not affect other users of these systems. +Analyses respect the repository settings `max_snapshot_bytes_per_sec` and `max_restore_bytes_per_sec` if available and the cluster setting `indices.recovery.max_bytes_per_sec` which you can use to limit the bandwidth they consume. -::::{note} -Different versions of Elasticsearch may perform different checks for repository compatibility, with newer versions typically being stricter than older ones. A storage system that passes repository analysis with one version of Elasticsearch may fail with a different version. This indicates it behaves incorrectly in ways that the former version did not detect. You must work with the supplier of your storage system to address the incompatibilities detected by the repository analysis API in any version of Elasticsearch. -:::: +NOTE: This API is intended for exploratory use by humans. You should expect the request parameters and the response format to vary in future versions. +NOTE: Different versions of Elasticsearch may perform different checks for repository compatibility, with newer versions typically being stricter than older ones. +A storage system that passes repository analysis with one version of Elasticsearch may fail with a different version. +This indicates it behaves incorrectly in ways that the former version did not detect. +You must work with the supplier of your storage system to address the incompatibilities detected by the repository analysis API in any version of Elasticsearch. -::::{note} -This API may not work correctly in a mixed-version cluster. -:::: +NOTE: This API may not work correctly in a mixed-version cluster. +*Implementation details* -**Implementation details** +NOTE: This section of documentation describes how the repository analysis API works in this version of Elasticsearch, but you should expect the implementation to vary between versions. The request parameters and response format depend on details of the implementation so may also be different in newer versions. -::::{note} -This section of documentation describes how the repository analysis API works in this version of Elasticsearch, but you should expect the implementation to vary between versions. The request parameters and response format depend on details of the implementation so may also be different in newer versions. -:::: +The analysis comprises a number of blob-level tasks, as set by the `blob_count` parameter and a number of compare-and-exchange operations on linearizable registers, as set by the `register_operation_count` parameter. +These tasks are distributed over the data and master-eligible nodes in the cluster for execution. +For most blob-level tasks, the executing node first writes a blob to the repository and then instructs some of the other nodes in the cluster to attempt to read the data it just wrote. +The size of the blob is chosen randomly, according to the `max_blob_size` and `max_total_data_size` parameters. +If any of these reads fails then the repository does not implement the necessary read-after-write semantics that Elasticsearch requires. -The analysis comprises a number of blob-level tasks, as set by the `blob_count` parameter and a number of compare-and-exchange operations on linearizable registers, as set by the `register_operation_count` parameter. These tasks are distributed over the data and master-eligible nodes in the cluster for execution. +For some blob-level tasks, the executing node will instruct some of its peers to attempt to read the data before the writing process completes. +These reads are permitted to fail, but must not return partial data. +If any read returns partial data then the repository does not implement the necessary atomicity semantics that Elasticsearch requires. -For most blob-level tasks, the executing node first writes a blob to the repository and then instructs some of the other nodes in the cluster to attempt to read the data it just wrote. The size of the blob is chosen randomly, according to the `max_blob_size` and `max_total_data_size` parameters. If any of these reads fails then the repository does not implement the necessary read-after-write semantics that Elasticsearch requires. +For some blob-level tasks, the executing node will overwrite the blob while its peers are reading it. +In this case the data read may come from either the original or the overwritten blob, but the read operation must not return partial data or a mix of data from the two blobs. +If any of these reads returns partial data or a mix of the two blobs then the repository does not implement the necessary atomicity semantics that Elasticsearch requires for overwrites. -For some blob-level tasks, the executing node will instruct some of its peers to attempt to read the data before the writing process completes. These reads are permitted to fail, but must not return partial data. If any read returns partial data then the repository does not implement the necessary atomicity semantics that Elasticsearch requires. +The executing node will use a variety of different methods to write the blob. +For instance, where applicable, it will use both single-part and multi-part uploads. +Similarly, the reading nodes will use a variety of different methods to read the data back again. +For instance they may read the entire blob from start to end or may read only a subset of the data. -For some blob-level tasks, the executing node will overwrite the blob while its peers are reading it. In this case the data read may come from either the original or the overwritten blob, but the read operation must not return partial data or a mix of data from the two blobs. If any of these reads returns partial data or a mix of the two blobs then the repository does not implement the necessary atomicity semantics that Elasticsearch requires for overwrites. +For some blob-level tasks, the executing node will cancel the write before it is complete. +In this case, it still instructs some of the other nodes in the cluster to attempt to read the blob but all of these reads must fail to find the blob. -The executing node will use a variety of different methods to write the blob. For instance, where applicable, it will use both single-part and multi-part uploads. Similarly, the reading nodes will use a variety of different methods to read the data back again. For instance they may read the entire blob from start to end or may read only a subset of the data. +Linearizable registers are special blobs that Elasticsearch manipulates using an atomic compare-and-exchange operation. +This operation ensures correct and strongly-consistent behavior even when the blob is accessed by multiple nodes at the same time. +The detailed implementation of the compare-and-exchange operation on linearizable registers varies by repository type. +Repository analysis verifies that that uncontended compare-and-exchange operations on a linearizable register blob always succeed. +Repository analysis also verifies that contended operations either succeed or report the contention but do not return incorrect results. +If an operation fails due to contention, Elasticsearch retries the operation until it succeeds. +Most of the compare-and-exchange operations performed by repository analysis atomically increment a counter which is represented as an 8-byte blob. +Some operations also verify the behavior on small blobs with sizes other than 8 bytes. -For some blob-level tasks, the executing node will cancel the write before it is complete. In this case, it still instructs some of the other nodes in the cluster to attempt to read the blob but all of these reads must fail to find the blob. - -Linearizable registers are special blobs that Elasticsearch manipulates using an atomic compare-and-exchange operation. This operation ensures correct and strongly-consistent behavior even when the blob is accessed by multiple nodes at the same time. The detailed implementation of the compare-and-exchange operation on linearizable registers varies by repository type. Repository analysis verifies that that uncontended compare-and-exchange operations on a linearizable register blob always succeed. Repository analysis also verifies that contended operations either succeed or report the contention but do not return incorrect results. If an operation fails due to contention, Elasticsearch retries the operation until it succeeds. Most of the compare-and-exchange operations performed by repository analysis atomically increment a counter which is represented as an 8-byte blob. Some operations also verify the behavior on small blobs with sizes other than 8 bytes. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-repository-analyze) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-snapshot-repository-analyze) ```ts client.snapshot.repositoryAnalyze({ repository }) ``` - -### Arguments [_arguments_438] - -* **Request (object):** - - * **`repository` (string)**: The name of the repository. - * **`blob_count` (Optional, number)**: The total number of blobs to write to the repository during the test. For realistic experiments, you should set it to at least `2000`. - * **`concurrency` (Optional, number)**: The number of operations to run concurrently during the test. - * **`detailed` (Optional, boolean)**: Indicates whether to return detailed results, including timing information for every operation performed during the analysis. If false, it returns only a summary of the analysis. - * **`early_read_node_count` (Optional, number)**: The number of nodes on which to perform an early read operation while writing each blob. Early read operations are only rarely performed. - * **`max_blob_size` (Optional, number | string)**: The maximum size of a blob to be written during the test. For realistic experiments, you should set it to at least `2gb`. - * **`max_total_data_size` (Optional, number | string)**: An upper limit on the total size of all the blobs written during the test. For realistic experiments, you should set it to at least `1tb`. - * **`rare_action_probability` (Optional, number)**: The probability of performing a rare action such as an early read, an overwrite, or an aborted write on each blob. - * **`rarely_abort_writes` (Optional, boolean)**: Indicates whether to rarely cancel writes before they complete. - * **`read_node_count` (Optional, number)**: The number of nodes on which to read a blob after writing. - * **`register_operation_count` (Optional, number)**: The minimum number of linearizable register operations to perform in total. For realistic experiments, you should set it to at least `100`. - * **`seed` (Optional, number)**: The seed for the pseudo-random number generator used to generate the list of operations performed during the test. To repeat the same set of operations in multiple experiments, use the same seed in each experiment. Note that the operations are performed concurrently so might not always happen in the same order on each run. - * **`timeout` (Optional, string | -1 | 0)**: The period of time to wait for the test to complete. If no response is received before the timeout expires, the test is cancelled and returns an error. - - - -### restore [_restore] - -Restore a snapshot. Restore a snapshot of a cluster or data streams and indices. - -You can restore a snapshot only to a running cluster with an elected master node. The snapshot repository must be registered and available to the cluster. The snapshot and cluster versions must be compatible. - -To restore a snapshot, the cluster’s global metadata must be writable. Ensure there are’t any cluster blocks that prevent writes. The restore operation ignores index blocks. +### Arguments [_arguments_snapshot.repository_analyze] + +#### Request (object) [_request_snapshot.repository_analyze] +- **`repository` (string)**: The name of the repository. +- **`blob_count` (Optional, number)**: The total number of blobs to write to the repository during the test. +For realistic experiments, you should set it to at least `2000`. +- **`concurrency` (Optional, number)**: The number of operations to run concurrently during the test. +- **`detailed` (Optional, boolean)**: Indicates whether to return detailed results, including timing information for every operation performed during the analysis. +If false, it returns only a summary of the analysis. +- **`early_read_node_count` (Optional, number)**: The number of nodes on which to perform an early read operation while writing each blob. +Early read operations are only rarely performed. +- **`max_blob_size` (Optional, number | string)**: The maximum size of a blob to be written during the test. +For realistic experiments, you should set it to at least `2gb`. +- **`max_total_data_size` (Optional, number | string)**: An upper limit on the total size of all the blobs written during the test. +For realistic experiments, you should set it to at least `1tb`. +- **`rare_action_probability` (Optional, number)**: The probability of performing a rare action such as an early read, an overwrite, or an aborted write on each blob. +- **`rarely_abort_writes` (Optional, boolean)**: Indicates whether to rarely cancel writes before they complete. +- **`read_node_count` (Optional, number)**: The number of nodes on which to read a blob after writing. +- **`register_operation_count` (Optional, number)**: The minimum number of linearizable register operations to perform in total. +For realistic experiments, you should set it to at least `100`. +- **`seed` (Optional, number)**: The seed for the pseudo-random number generator used to generate the list of operations performed during the test. +To repeat the same set of operations in multiple experiments, use the same seed in each experiment. +Note that the operations are performed concurrently so might not always happen in the same order on each run. +- **`timeout` (Optional, string | -1 | 0)**: The period of time to wait for the test to complete. +If no response is received before the timeout expires, the test is cancelled and returns an error. + +## client.snapshot.restore [_snapshot.restore] +Restore a snapshot. +Restore a snapshot of a cluster or data streams and indices. + +You can restore a snapshot only to a running cluster with an elected master node. +The snapshot repository must be registered and available to the cluster. +The snapshot and cluster versions must be compatible. + +To restore a snapshot, the cluster's global metadata must be writable. Ensure there are't any cluster blocks that prevent writes. The restore operation ignores index blocks. Before you restore a data stream, ensure the cluster contains a matching index template with data streams enabled. To check, use the index management feature in Kibana or the get index template API: @@ -12867,28 +13874,36 @@ Before you restore a data stream, ensure the cluster contains a matching index t GET _index_template/*?filter_path=index_templates.name,index_templates.index_template.index_patterns,index_templates.index_template.data_stream ``` -If no such template exists, you can create one or restore a cluster state that contains one. Without a matching index template, a data stream can’t roll over or create backing indices. +If no such template exists, you can create one or restore a cluster state that contains one. Without a matching index template, a data stream can't roll over or create backing indices. If your snapshot contains data from App Search or Workplace Search, you must restore the Enterprise Search encryption key before you restore the snapshot. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-restore) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-snapshot-restore) ```ts client.snapshot.restore({ repository, snapshot }) ``` +### Arguments [_arguments_snapshot.restore] -### Arguments [_arguments_439] - -* **Request (object):** +#### Request (object) [_request_snapshot.restore] +- **`repository` (string)**: The name of the repository to restore a snapshot from. +- **`snapshot` (string)**: The name of the snapshot to restore. +- **`feature_states` (Optional, string[])**: The feature states to restore. +If `include_global_state` is `true`, the request restores all feature states in the snapshot by default. +If `include_global_state` is `false`, the request restores no feature states by default. +Note that specifying an empty array will result in the default behavior. +To restore no feature states, regardless of the `include_global_state` value, specify an array containing only the value `none` (`["none"]`). +- **`ignore_index_settings` (Optional, string[])**: The index settings to not restore from the snapshot. +You can't use this option to ignore `index.number_of_shards`. - * **`repository` (string)**: The name of the repository to restore a snapshot from. - * **`snapshot` (string)**: The name of the snapshot to restore. - * **`feature_states` (Optional, string[])**: The feature states to restore. If `include_global_state` is `true`, the request restores all feature states in the snapshot by default. If `include_global_state` is `false`, the request restores no feature states by default. Note that specifying an empty array will result in the default behavior. To restore no feature states, regardless of the `include_global_state` value, specify an array containing only the value `none` (`["none"]`). - * **`ignore_index_settings` (Optional, string[])**: The index settings to not restore from the snapshot. You can’t use this option to ignore `index.number_of_shards`. - - -For data streams, this option applies only to restored backing indices. New backing indices are configured using the data stream’s matching index template. ** *`ignore_unavailable` (Optional, boolean)**: If `true`, the request ignores any index or data stream in indices that’s missing from the snapshot. If `false`, the request returns an error for any missing index or data stream. *** *`include_aliases` (Optional, boolean)**: If `true`, the request restores aliases for any restored data streams and indices. If `false`, the request doesn’t restore aliases. ** *`include_global_state` (Optional, boolean)**: If `true`, restore the cluster state. The cluster state includes: +For data streams, this option applies only to restored backing indices. +New backing indices are configured using the data stream's matching index template. +- **`ignore_unavailable` (Optional, boolean)**: If `true`, the request ignores any index or data stream in indices that's missing from the snapshot. +If `false`, the request returns an error for any missing index or data stream. +- **`include_aliases` (Optional, boolean)**: If `true`, the request restores aliases for any restored data streams and indices. +If `false`, the request doesn’t restore aliases. +- **`include_global_state` (Optional, boolean)**: If `true`, restore the cluster state. The cluster state includes: * Persistent cluster settings * Index templates @@ -12898,492 +13913,470 @@ For data streams, this option applies only to restored backing indices. New back * Stored scripts * For snapshots taken after 7.12.0, feature states -If `include_global_state` is `true`, the restore operation merges the legacy index templates in your cluster with the templates contained in the snapshot, replacing any existing ones whose name matches one in the snapshot. It completely removes all persistent settings, non-legacy index templates, ingest pipelines, and ILM lifecycle policies that exist in your cluster and replaces them with the corresponding items from the snapshot. +If `include_global_state` is `true`, the restore operation merges the legacy index templates in your cluster with the templates contained in the snapshot, replacing any existing ones whose name matches one in the snapshot. +It completely removes all persistent settings, non-legacy index templates, ingest pipelines, and ILM lifecycle policies that exist in your cluster and replaces them with the corresponding items from the snapshot. Use the `feature_states` parameter to configure how feature states are restored. -If `include_global_state` is `true` and a snapshot was created without a global state then the restore request will fail. *** *`index_settings` (Optional, { index, mode, routing_path, soft_deletes, sort, number_of_shards, number_of_replicas, number_of_routing_shards, check_on_startup, codec, routing_partition_size, load_fixed_bitset_filters_eagerly, hidden, auto_expand_replicas, merge, search, refresh_interval, max_result_window, max_inner_result_window, max_rescore_window, max_docvalue_fields_search, max_script_fields, max_ngram_diff, max_shingle_diff, blocks, max_refresh_listeners, analyze, highlight, max_terms_count, max_regex_length, routing, gc_deletes, default_pipeline, final_pipeline, lifecycle, provided_name, creation_date, creation_date_string, uuid, version, verified_before_close, format, max_slices_per_scroll, translog, query_string, priority, top_metrics_max_size, analysis, settings, time_series, queries, similarity, mapping, indexing.slowlog, indexing_pressure, store })**: Index settings to add or change in restored indices, including backing indices. You can’t use this option to change `index.number_of_shards`. - -For data streams, this option applies only to restored backing indices. New backing indices are configured using the data stream’s matching index template. *** *`indices` (Optional, string | string[])**: A list of indices and data streams to restore. It supports a multi-target syntax. The default behavior is all regular indices and regular data streams in the snapshot. - -You can’t use this parameter to restore system indices or system data streams. Use `feature_states` instead. *** *`partial` (Optional, boolean)**: If `false`, the entire restore operation will fail if one or more indices included in the snapshot do not have all primary shards available. - -If true, it allows restoring a partial snapshot of indices with unavailable shards. Only shards that were successfully included in the snapshot will be restored. All missing shards will be recreated as empty. *** *`rename_pattern` (Optional, string)**: A rename pattern to apply to restored data streams and indices. Data streams and indices matching the rename pattern will be renamed according to `rename_replacement`. - -The rename pattern is applied as defined by the regular expression that supports referencing the original text, according to the `appendReplacement` logic. ** *`rename_replacement` (Optional, string)**: The rename replacement string that is used with the `rename_pattern`. *** *`master_timeout` (Optional, string | -1 | 0)**: The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. ** *`wait_for_completion` (Optional, boolean)**: If `true`, the request returns a response when the restore operation completes. The operation is complete when it finishes all attempts to recover primary shards for restored indices. This applies even if one or more of the recovery attempts fail. +If `include_global_state` is `true` and a snapshot was created without a global state then the restore request will fail. +- **`index_settings` (Optional, { index, mode, routing_path, soft_deletes, sort, number_of_shards, number_of_replicas, number_of_routing_shards, check_on_startup, codec, routing_partition_size, load_fixed_bitset_filters_eagerly, hidden, auto_expand_replicas, merge, search, refresh_interval, max_result_window, max_inner_result_window, max_rescore_window, max_docvalue_fields_search, max_script_fields, max_ngram_diff, max_shingle_diff, blocks, max_refresh_listeners, analyze, highlight, max_terms_count, max_regex_length, routing, gc_deletes, default_pipeline, final_pipeline, lifecycle, provided_name, creation_date, creation_date_string, uuid, version, verified_before_close, format, max_slices_per_scroll, translog, query_string, priority, top_metrics_max_size, analysis, settings, time_series, queries, similarity, mapping, indexing.slowlog, indexing_pressure, store })**: Index settings to add or change in restored indices, including backing indices. +You can't use this option to change `index.number_of_shards`. + +For data streams, this option applies only to restored backing indices. +New backing indices are configured using the data stream's matching index template. +- **`indices` (Optional, string | string[])**: A list of indices and data streams to restore. +It supports a multi-target syntax. +The default behavior is all regular indices and regular data streams in the snapshot. + +You can't use this parameter to restore system indices or system data streams. +Use `feature_states` instead. +- **`partial` (Optional, boolean)**: If `false`, the entire restore operation will fail if one or more indices included in the snapshot do not have all primary shards available. + +If true, it allows restoring a partial snapshot of indices with unavailable shards. +Only shards that were successfully included in the snapshot will be restored. +All missing shards will be recreated as empty. +- **`rename_pattern` (Optional, string)**: A rename pattern to apply to restored data streams and indices. +Data streams and indices matching the rename pattern will be renamed according to `rename_replacement`. + +The rename pattern is applied as defined by the regular expression that supports referencing the original text, according to the `appendReplacement` logic. +- **`rename_replacement` (Optional, string)**: The rename replacement string that is used with the `rename_pattern`. +- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for the master node. +If the master node is not available before the timeout expires, the request fails and returns an error. +To indicate that the request should never timeout, set it to `-1`. +- **`wait_for_completion` (Optional, boolean)**: If `true`, the request returns a response when the restore operation completes. +The operation is complete when it finishes all attempts to recover primary shards for restored indices. +This applies even if one or more of the recovery attempts fail. If `false`, the request returns a response when the restore operation initializes. +## client.snapshot.status [_snapshot.status] +Get the snapshot status. +Get a detailed description of the current state for each shard participating in the snapshot. -### status [_status_2] - -Get the snapshot status. Get a detailed description of the current state for each shard participating in the snapshot. - -Note that this API should be used only to obtain detailed shard-level information for ongoing snapshots. If this detail is not needed or you want to obtain information about one or more existing snapshots, use the get snapshot API. +Note that this API should be used only to obtain detailed shard-level information for ongoing snapshots. +If this detail is not needed or you want to obtain information about one or more existing snapshots, use the get snapshot API. -If you omit the `` request path parameter, the request retrieves information only for currently running snapshots. This usage is preferred. If needed, you can specify `` and `` to retrieve information for specific snapshots, even if they’re not currently running. +If you omit the `` request path parameter, the request retrieves information only for currently running snapshots. +This usage is preferred. +If needed, you can specify `` and `` to retrieve information for specific snapshots, even if they're not currently running. -::::{warning} -Using the API to return the status of any snapshots other than currently running snapshots can be expensive. The API requires a read from the repository for each shard in each snapshot. For example, if you have 100 snapshots with 1,000 shards each, an API request that includes all snapshots will require 100,000 reads (100 snapshots x 1,000 shards). -:::: +WARNING: Using the API to return the status of any snapshots other than currently running snapshots can be expensive. +The API requires a read from the repository for each shard in each snapshot. +For example, if you have 100 snapshots with 1,000 shards each, an API request that includes all snapshots will require 100,000 reads (100 snapshots x 1,000 shards). +Depending on the latency of your storage, such requests can take an extremely long time to return results. +These requests can also tax machine resources and, when using cloud storage, incur high processing costs. -Depending on the latency of your storage, such requests can take an extremely long time to return results. These requests can also tax machine resources and, when using cloud storage, incur high processing costs. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-status) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-snapshot-status) ```ts client.snapshot.status({ ... }) ``` +### Arguments [_arguments_snapshot.status] -### Arguments [_arguments_440] - -* **Request (object):** - - * **`repository` (Optional, string)**: The snapshot repository name used to limit the request. It supports wildcards (`*`) if `` isn’t specified. - * **`snapshot` (Optional, string | string[])**: A list of snapshots to retrieve status for. The default is currently running snapshots. Wildcards (`*`) are not supported. - * **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error for any snapshots that are unavailable. If `true`, the request ignores snapshots that are unavailable, such as those that are corrupted or temporarily cannot be returned. - * **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. - +#### Request (object) [_request_snapshot.status] +- **`repository` (Optional, string)**: The snapshot repository name used to limit the request. +It supports wildcards (`*`) if `` isn't specified. +- **`snapshot` (Optional, string | string[])**: A list of snapshots to retrieve status for. +The default is currently running snapshots. +Wildcards (`*`) are not supported. +- **`ignore_unavailable` (Optional, boolean)**: If `false`, the request returns an error for any snapshots that are unavailable. +If `true`, the request ignores snapshots that are unavailable, such as those that are corrupted or temporarily cannot be returned. +- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for the master node. +If the master node is not available before the timeout expires, the request fails and returns an error. +To indicate that the request should never timeout, set it to `-1`. +## client.snapshot.verifyRepository [_snapshot.verify_repository] +Verify a snapshot repository. +Check for common misconfigurations in a snapshot repository. -### verify_repository [_verify_repository] - -Verify a snapshot repository. Check for common misconfigurations in a snapshot repository. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-verify-repository) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-snapshot-verify-repository) ```ts client.snapshot.verifyRepository({ repository }) ``` +### Arguments [_arguments_snapshot.verify_repository] -### Arguments [_arguments_441] - -* **Request (object):** - - * **`repository` (string)**: The name of the snapshot repository to verify. - * **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. - * **`timeout` (Optional, string | -1 | 0)**: The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. If no response is received before the timeout expires, the cluster metadata update still applies but the response will indicate that it was not completely acknowledged. To indicate that the request should never timeout, set it to `-1`. - - - -## sql [_sql] - - -### clear_cursor [_clear_cursor] +#### Request (object) [_request_snapshot.verify_repository] +- **`repository` (string)**: The name of the snapshot repository to verify. +- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for the master node. +If the master node is not available before the timeout expires, the request fails and returns an error. +To indicate that the request should never timeout, set it to `-1`. +- **`timeout` (Optional, string | -1 | 0)**: The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. +If no response is received before the timeout expires, the cluster metadata update still applies but the response will indicate that it was not completely acknowledged. +To indicate that the request should never timeout, set it to `-1`. +## client.sql.clearCursor [_sql.clear_cursor] Clear an SQL search cursor. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-sql-clear-cursor) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-sql-clear-cursor) ```ts client.sql.clearCursor({ cursor }) ``` +### Arguments [_arguments_sql.clear_cursor] -### Arguments [_arguments_442] - -* **Request (object):** - - * **`cursor` (string)**: Cursor to clear. - +#### Request (object) [_request_sql.clear_cursor] +- **`cursor` (string)**: Cursor to clear. - -### delete_async [_delete_async] - -Delete an async SQL search. Delete an async SQL search or a stored synchronous SQL search. If the search is still running, the API cancels it. +## client.sql.deleteAsync [_sql.delete_async] +Delete an async SQL search. +Delete an async SQL search or a stored synchronous SQL search. +If the search is still running, the API cancels it. If the Elasticsearch security features are enabled, only the following users can use this API to delete a search: * Users with the `cancel_task` cluster privilege. * The user who first submitted the search. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-sql-delete-async) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-sql-delete-async) ```ts client.sql.deleteAsync({ id }) ``` +### Arguments [_arguments_sql.delete_async] -### Arguments [_arguments_443] - -* **Request (object):** - - * **`id` (string)**: The identifier for the search. +#### Request (object) [_request_sql.delete_async] +- **`id` (string)**: The identifier for the search. - - -### get_async [_get_async] - -Get async SQL search results. Get the current status and available results for an async SQL search or stored synchronous SQL search. +## client.sql.getAsync [_sql.get_async] +Get async SQL search results. +Get the current status and available results for an async SQL search or stored synchronous SQL search. If the Elasticsearch security features are enabled, only the user who first submitted the SQL search can retrieve the search using this API. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-sql-get-async) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-sql-get-async) ```ts client.sql.getAsync({ id }) ``` +### Arguments [_arguments_sql.get_async] -### Arguments [_arguments_444] - -* **Request (object):** - - * **`id` (string)**: The identifier for the search. - * **`delimiter` (Optional, string)**: The separator for CSV results. The API supports this parameter only for CSV responses. - * **`format` (Optional, string)**: The format for the response. You must specify a format using this parameter or the `Accept` HTTP header. If you specify both, the API uses this parameter. - * **`keep_alive` (Optional, string | -1 | 0)**: The retention period for the search and its results. It defaults to the `keep_alive` period for the original SQL search. - * **`wait_for_completion_timeout` (Optional, string | -1 | 0)**: The period to wait for complete results. It defaults to no timeout, meaning the request waits for complete search results. - - - -### get_async_status [_get_async_status] - -Get the async SQL search status. Get the current status of an async SQL search or a stored synchronous SQL search. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-sql-get-async-status) - -```ts -client.sql.getAsyncStatus({ id }) -``` - - -### Arguments [_arguments_445] - -* **Request (object):** - - * **`id` (string)**: The identifier for the search. - - - -### query [_query_2] - -Get SQL search results. Run an SQL request. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-sql-query) - -```ts -client.sql.query({ ... }) -``` - - -### Arguments [_arguments_446] - -* **Request (object):** - - * **`allow_partial_search_results` (Optional, boolean)**: If `true`, the response has partial results when there are shard request timeouts or shard failures. If `false`, the API returns an error with no partial results. - * **`catalog` (Optional, string)**: The default catalog (cluster) for queries. If unspecified, the queries execute on the data in the local cluster only. - * **`columnar` (Optional, boolean)**: If `true`, the results are in a columnar fashion: one row represents all the values of a certain column from the current page of results. The API supports this parameter only for CBOR, JSON, SMILE, and YAML responses. - * **`cursor` (Optional, string)**: The cursor used to retrieve a set of paginated results. If you specify a cursor, the API only uses the `columnar` and `time_zone` request body parameters. It ignores other request body parameters. - * **`fetch_size` (Optional, number)**: The maximum number of rows (or entries) to return in one response. - * **`field_multi_value_leniency` (Optional, boolean)**: If `false`, the API returns an exception when encountering multiple values for a field. If `true`, the API is lenient and returns the first value from the array with no guarantee of consistent results. - * **`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: The Elasticsearch query DSL for additional filtering. - * **`index_using_frozen` (Optional, boolean)**: If `true`, the search can run on frozen indices. - * **`keep_alive` (Optional, string | -1 | 0)**: The retention period for an async or saved synchronous search. - * **`keep_on_completion` (Optional, boolean)**: If `true`, Elasticsearch stores synchronous searches if you also specify the `wait_for_completion_timeout` parameter. If `false`, Elasticsearch only stores async searches that don’t finish before the `wait_for_completion_timeout`. - * **`page_timeout` (Optional, string | -1 | 0)**: The minimum retention period for the scroll cursor. After this time period, a pagination request might fail because the scroll cursor is no longer available. Subsequent scroll requests prolong the lifetime of the scroll cursor by the duration of `page_timeout` in the scroll request. - * **`params` (Optional, Record)**: The values for parameters in the query. - * **`query` (Optional, string)**: The SQL query to run. - * **`request_timeout` (Optional, string | -1 | 0)**: The timeout before the request fails. - * **`runtime_mappings` (Optional, Record)**: One or more runtime fields for the search request. These fields take precedence over mapped fields with the same name. - * **`time_zone` (Optional, string)**: The ISO-8601 time zone ID for the search. - * **`wait_for_completion_timeout` (Optional, string | -1 | 0)**: The period to wait for complete results. It defaults to no timeout, meaning the request waits for complete search results. If the search doesn’t finish within this period, the search becomes async. - +#### Request (object) [_request_sql.get_async] +- **`id` (string)**: The identifier for the search. +- **`delimiter` (Optional, string)**: The separator for CSV results. +The API supports this parameter only for CSV responses. +- **`format` (Optional, string)**: The format for the response. +You must specify a format using this parameter or the `Accept` HTTP header. +If you specify both, the API uses this parameter. +- **`keep_alive` (Optional, string | -1 | 0)**: The retention period for the search and its results. +It defaults to the `keep_alive` period for the original SQL search. +- **`wait_for_completion_timeout` (Optional, string | -1 | 0)**: The period to wait for complete results. +It defaults to no timeout, meaning the request waits for complete search results. -To save a synchronous search, you must specify this parameter and the `keep_on_completion` parameter. *** *`format` (Optional, Enum("csv" | "json" | "tsv" | "txt" | "yaml" | "cbor" | "smile"))**: The format for the response. You can also specify a format using the `Accept` HTTP header. If you specify both this parameter and the `Accept` HTTP header, this parameter takes precedence. +## client.sql.getAsyncStatus [_sql.get_async_status] +Get the async SQL search status. +Get the current status of an async SQL search or a stored synchronous SQL search. - -### translate [_translate] - -Translate SQL into Elasticsearch queries. Translate an SQL search into a search API request containing Query DSL. It accepts the same request body parameters as the SQL search API, excluding `cursor`. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-sql-translate) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-sql-get-async-status) ```ts -client.sql.translate({ query }) +client.sql.getAsyncStatus({ id }) ``` +### Arguments [_arguments_sql.get_async_status] -### Arguments [_arguments_447] +#### Request (object) [_request_sql.get_async_status] +- **`id` (string)**: The identifier for the search. -* **Request (object):** +## client.sql.query [_sql.query] +Get SQL search results. +Run an SQL request. - * **`query` (string)**: The SQL query to run. - * **`fetch_size` (Optional, number)**: The maximum number of rows (or entries) to return in one response. - * **`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: The Elasticsearch query DSL for additional filtering. - * **`time_zone` (Optional, string)**: The ISO-8601 time zone ID for the search. +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-sql-query) +```ts +client.sql.query({ ... }) +``` +### Arguments [_arguments_sql.query] + +#### Request (object) [_request_sql.query] +- **`allow_partial_search_results` (Optional, boolean)**: If `true`, the response has partial results when there are shard request timeouts or shard failures. +If `false`, the API returns an error with no partial results. +- **`catalog` (Optional, string)**: The default catalog (cluster) for queries. +If unspecified, the queries execute on the data in the local cluster only. +- **`columnar` (Optional, boolean)**: If `true`, the results are in a columnar fashion: one row represents all the values of a certain column from the current page of results. +The API supports this parameter only for CBOR, JSON, SMILE, and YAML responses. +- **`cursor` (Optional, string)**: The cursor used to retrieve a set of paginated results. +If you specify a cursor, the API only uses the `columnar` and `time_zone` request body parameters. +It ignores other request body parameters. +- **`fetch_size` (Optional, number)**: The maximum number of rows (or entries) to return in one response. +- **`field_multi_value_leniency` (Optional, boolean)**: If `false`, the API returns an exception when encountering multiple values for a field. +If `true`, the API is lenient and returns the first value from the array with no guarantee of consistent results. +- **`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: The Elasticsearch query DSL for additional filtering. +- **`index_using_frozen` (Optional, boolean)**: If `true`, the search can run on frozen indices. +- **`keep_alive` (Optional, string | -1 | 0)**: The retention period for an async or saved synchronous search. +- **`keep_on_completion` (Optional, boolean)**: If `true`, Elasticsearch stores synchronous searches if you also specify the `wait_for_completion_timeout` parameter. +If `false`, Elasticsearch only stores async searches that don't finish before the `wait_for_completion_timeout`. +- **`page_timeout` (Optional, string | -1 | 0)**: The minimum retention period for the scroll cursor. +After this time period, a pagination request might fail because the scroll cursor is no longer available. +Subsequent scroll requests prolong the lifetime of the scroll cursor by the duration of `page_timeout` in the scroll request. +- **`params` (Optional, Record)**: The values for parameters in the query. +- **`query` (Optional, string)**: The SQL query to run. +- **`request_timeout` (Optional, string | -1 | 0)**: The timeout before the request fails. +- **`runtime_mappings` (Optional, Record)**: One or more runtime fields for the search request. +These fields take precedence over mapped fields with the same name. +- **`time_zone` (Optional, string)**: The ISO-8601 time zone ID for the search. +- **`wait_for_completion_timeout` (Optional, string | -1 | 0)**: The period to wait for complete results. +It defaults to no timeout, meaning the request waits for complete search results. +If the search doesn't finish within this period, the search becomes async. + +To save a synchronous search, you must specify this parameter and the `keep_on_completion` parameter. +- **`format` (Optional, Enum("csv" | "json" | "tsv" | "txt" | "yaml" | "cbor" | "smile"))**: The format for the response. +You can also specify a format using the `Accept` HTTP header. +If you specify both this parameter and the `Accept` HTTP header, this parameter takes precedence. + +## client.sql.translate [_sql.translate] +Translate SQL into Elasticsearch queries. +Translate an SQL search into a search API request containing Query DSL. +It accepts the same request body parameters as the SQL search API, excluding `cursor`. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-sql-translate) -## ssl [_ssl] +```ts +client.sql.translate({ query }) +``` +### Arguments [_arguments_sql.translate] -### certificates [_certificates] +#### Request (object) [_request_sql.translate] +- **`query` (string)**: The SQL query to run. +- **`fetch_size` (Optional, number)**: The maximum number of rows (or entries) to return in one response. +- **`filter` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: The Elasticsearch query DSL for additional filtering. +- **`time_zone` (Optional, string)**: The ISO-8601 time zone ID for the search. +## client.ssl.certificates [_ssl.certificates] Get SSL certificates. -Get information about the X.509 certificates that are used to encrypt communications in the cluster. The API returns a list that includes certificates from all TLS contexts including: +Get information about the X.509 certificates that are used to encrypt communications in the cluster. +The API returns a list that includes certificates from all TLS contexts including: -* Settings for transport and HTTP interfaces -* TLS settings that are used within authentication realms -* TLS settings for remote monitoring exporters +- Settings for transport and HTTP interfaces +- TLS settings that are used within authentication realms +- TLS settings for remote monitoring exporters -The list includes certificates that are used for configuring trust, such as those configured in the `xpack.security.transport.ssl.truststore` and `xpack.security.transport.ssl.certificate_authorities` settings. It also includes certificates that are used for configuring server identity, such as `xpack.security.http.ssl.keystore` and `xpack.security.http.ssl.certificate settings`. +The list includes certificates that are used for configuring trust, such as those configured in the `xpack.security.transport.ssl.truststore` and `xpack.security.transport.ssl.certificate_authorities` settings. +It also includes certificates that are used for configuring server identity, such as `xpack.security.http.ssl.keystore` and `xpack.security.http.ssl.certificate settings`. The list does not include certificates that are sourced from the default SSL context of the Java Runtime Environment (JRE), even if those certificates are in use within Elasticsearch. -::::{note} -When a PKCS#11 token is configured as the truststore of the JRE, the API returns all the certificates that are included in the PKCS#11 token irrespective of whether these are used in the Elasticsearch TLS configuration. -:::: - +NOTE: When a PKCS#11 token is configured as the truststore of the JRE, the API returns all the certificates that are included in the PKCS#11 token irrespective of whether these are used in the Elasticsearch TLS configuration. If Elasticsearch is configured to use a keystore or truststore, the API output includes all certificates in that store, even though some of the certificates might not be in active use within the cluster. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ssl-certificates) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ssl-certificates) ```ts client.ssl.certificates() ``` -## synonyms [_synonyms] - - -### delete_synonym [_delete_synonym] - +## client.synonyms.deleteSynonym [_synonyms.delete_synonym] Delete a synonym set. You can only delete a synonyms set that is not in use by any index analyzer. -Synonyms sets can be used in synonym graph token filters and synonym token filters. These synonym filters can be used as part of search analyzers. +Synonyms sets can be used in synonym graph token filters and synonym token filters. +These synonym filters can be used as part of search analyzers. -Analyzers need to be loaded when an index is restored (such as when a node starts, or the index becomes open). Even if the analyzer is not used on any field mapping, it still needs to be loaded on the index recovery phase. +Analyzers need to be loaded when an index is restored (such as when a node starts, or the index becomes open). +Even if the analyzer is not used on any field mapping, it still needs to be loaded on the index recovery phase. -If any analyzers cannot be loaded, the index becomes unavailable and the cluster status becomes red or yellow as index shards are not available. To prevent that, synonyms sets that are used in analyzers can’t be deleted. A delete request in this case will return a 400 response code. +If any analyzers cannot be loaded, the index becomes unavailable and the cluster status becomes red or yellow as index shards are not available. +To prevent that, synonyms sets that are used in analyzers can't be deleted. +A delete request in this case will return a 400 response code. -To remove a synonyms set, you must first remove all indices that contain analyzers using it. You can migrate an index by creating a new index that does not contain the token filter with the synonyms set, and use the reindex API in order to copy over the index data. Once finished, you can delete the index. When the synonyms set is not used in analyzers, you will be able to delete it. +To remove a synonyms set, you must first remove all indices that contain analyzers using it. +You can migrate an index by creating a new index that does not contain the token filter with the synonyms set, and use the reindex API in order to copy over the index data. +Once finished, you can delete the index. +When the synonyms set is not used in analyzers, you will be able to delete it. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-synonyms-delete-synonym) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-synonyms-delete-synonym) ```ts client.synonyms.deleteSynonym({ id }) ``` +### Arguments [_arguments_synonyms.delete_synonym] -### Arguments [_arguments_448] - -* **Request (object):** - - * **`id` (string)**: The synonyms set identifier to delete. - - - -### delete_synonym_rule [_delete_synonym_rule] +#### Request (object) [_request_synonyms.delete_synonym] +- **`id` (string)**: The synonyms set identifier to delete. -Delete a synonym rule. Delete a synonym rule from a synonym set. +## client.synonyms.deleteSynonymRule [_synonyms.delete_synonym_rule] +Delete a synonym rule. +Delete a synonym rule from a synonym set. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-synonyms-delete-synonym-rule) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-synonyms-delete-synonym-rule) ```ts client.synonyms.deleteSynonymRule({ set_id, rule_id }) ``` +### Arguments [_arguments_synonyms.delete_synonym_rule] -### Arguments [_arguments_449] - -* **Request (object):** - - * **`set_id` (string)**: The ID of the synonym set to update. - * **`rule_id` (string)**: The ID of the synonym rule to delete. - - - -### get_synonym [_get_synonym] +#### Request (object) [_request_synonyms.delete_synonym_rule] +- **`set_id` (string)**: The ID of the synonym set to update. +- **`rule_id` (string)**: The ID of the synonym rule to delete. +## client.synonyms.getSynonym [_synonyms.get_synonym] Get a synonym set. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-synonyms-get-synonym) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-synonyms-get-synonym) ```ts client.synonyms.getSynonym({ id }) ``` +### Arguments [_arguments_synonyms.get_synonym] -### Arguments [_arguments_450] - -* **Request (object):** - - * **`id` (string)**: The synonyms set identifier to retrieve. - * **`from` (Optional, number)**: The starting offset for query rules to retrieve. - * **`size` (Optional, number)**: The max number of query rules to retrieve. - +#### Request (object) [_request_synonyms.get_synonym] +- **`id` (string)**: The synonyms set identifier to retrieve. +- **`from` (Optional, number)**: The starting offset for query rules to retrieve. +- **`size` (Optional, number)**: The max number of query rules to retrieve. +## client.synonyms.getSynonymRule [_synonyms.get_synonym_rule] +Get a synonym rule. +Get a synonym rule from a synonym set. -### get_synonym_rule [_get_synonym_rule] - -Get a synonym rule. Get a synonym rule from a synonym set. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-synonyms-get-synonym-rule) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-synonyms-get-synonym-rule) ```ts client.synonyms.getSynonymRule({ set_id, rule_id }) ``` +### Arguments [_arguments_synonyms.get_synonym_rule] -### Arguments [_arguments_451] - -* **Request (object):** - - * **`set_id` (string)**: The ID of the synonym set to retrieve the synonym rule from. - * **`rule_id` (string)**: The ID of the synonym rule to retrieve. +#### Request (object) [_request_synonyms.get_synonym_rule] +- **`set_id` (string)**: The ID of the synonym set to retrieve the synonym rule from. +- **`rule_id` (string)**: The ID of the synonym rule to retrieve. +## client.synonyms.getSynonymsSets [_synonyms.get_synonyms_sets] +Get all synonym sets. +Get a summary of all defined synonym sets. - -### get_synonyms_sets [_get_synonyms_sets] - -Get all synonym sets. Get a summary of all defined synonym sets. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-synonyms-get-synonym) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-synonyms-get-synonym) ```ts client.synonyms.getSynonymsSets({ ... }) ``` +### Arguments [_arguments_synonyms.get_synonyms_sets] -### Arguments [_arguments_452] - -* **Request (object):** +#### Request (object) [_request_synonyms.get_synonyms_sets] +- **`from` (Optional, number)**: The starting offset for synonyms sets to retrieve. +- **`size` (Optional, number)**: The maximum number of synonyms sets to retrieve. - * **`from` (Optional, number)**: The starting offset for synonyms sets to retrieve. - * **`size` (Optional, number)**: The maximum number of synonyms sets to retrieve. +## client.synonyms.putSynonym [_synonyms.put_synonym] +Create or update a synonym set. +Synonyms sets are limited to a maximum of 10,000 synonym rules per set. +If you need to manage more synonym rules, you can create multiple synonym sets. +When an existing synonyms set is updated, the search analyzers that use the synonyms set are reloaded automatically for all indices. +This is equivalent to invoking the reload search analyzers API for all indices that use the synonyms set. - -### put_synonym [_put_synonym] - -Create or update a synonym set. Synonyms sets are limited to a maximum of 10,000 synonym rules per set. If you need to manage more synonym rules, you can create multiple synonym sets. - -When an existing synonyms set is updated, the search analyzers that use the synonyms set are reloaded automatically for all indices. This is equivalent to invoking the reload search analyzers API for all indices that use the synonyms set. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-synonyms-put-synonym) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-synonyms-put-synonym) ```ts client.synonyms.putSynonym({ id, synonyms_set }) ``` +### Arguments [_arguments_synonyms.put_synonym] -### Arguments [_arguments_453] - -* **Request (object):** - - * **`id` (string)**: The ID of the synonyms set to be created or updated. - * **`synonyms_set` ({ id, synonyms } | { id, synonyms }[])**: The synonym rules definitions for the synonyms set. - - +#### Request (object) [_request_synonyms.put_synonym] +- **`id` (string)**: The ID of the synonyms set to be created or updated. +- **`synonyms_set` ({ id, synonyms } | { id, synonyms }[])**: The synonym rules definitions for the synonyms set. -### put_synonym_rule [_put_synonym_rule] - -Create or update a synonym rule. Create or update a synonym rule in a synonym set. +## client.synonyms.putSynonymRule [_synonyms.put_synonym_rule] +Create or update a synonym rule. +Create or update a synonym rule in a synonym set. If any of the synonym rules included is invalid, the API returns an error. When you update a synonym rule, all analyzers using the synonyms set will be reloaded automatically to reflect the new rule. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-synonyms-put-synonym-rule) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-synonyms-put-synonym-rule) ```ts client.synonyms.putSynonymRule({ set_id, rule_id, synonyms }) ``` +### Arguments [_arguments_synonyms.put_synonym_rule] -### Arguments [_arguments_454] - -* **Request (object):** - - * **`set_id` (string)**: The ID of the synonym set. - * **`rule_id` (string)**: The ID of the synonym rule to be updated or created. - * **`synonyms` (string)**: The synonym rule information definition, which must be in Solr format. - - - -## tasks [_tasks_2] - - -### cancel [_cancel] +#### Request (object) [_request_synonyms.put_synonym_rule] +- **`set_id` (string)**: The ID of the synonym set. +- **`rule_id` (string)**: The ID of the synonym rule to be updated or created. +- **`synonyms` (string)**: The synonym rule information definition, which must be in Solr format. +## client.tasks.cancel [_tasks.cancel] Cancel a task. -::::{warning} -The task management API is new and should still be considered a beta feature. The API may change in ways that are not backwards compatible. -:::: +WARNING: The task management API is new and should still be considered a beta feature. +The API may change in ways that are not backwards compatible. +A task may continue to run for some time after it has been cancelled because it may not be able to safely stop its current activity straight away. +It is also possible that Elasticsearch must complete its work on other tasks before it can process the cancellation. +The get task information API will continue to list these cancelled tasks until they complete. +The cancelled flag in the response indicates that the cancellation command has been processed and the task will stop as soon as possible. -A task may continue to run for some time after it has been cancelled because it may not be able to safely stop its current activity straight away. It is also possible that Elasticsearch must complete its work on other tasks before it can process the cancellation. The get task information API will continue to list these cancelled tasks until they complete. The cancelled flag in the response indicates that the cancellation command has been processed and the task will stop as soon as possible. +To troubleshoot why a cancelled task does not complete promptly, use the get task information API with the `?detailed` parameter to identify the other tasks the system is running. +You can also use the node hot threads API to obtain detailed information about the work the system is doing instead of completing the cancelled task. -To troubleshoot why a cancelled task does not complete promptly, use the get task information API with the `?detailed` parameter to identify the other tasks the system is running. You can also use the node hot threads API to obtain detailed information about the work the system is doing instead of completing the cancelled task. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-tasks) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/group/endpoint-tasks) ```ts client.tasks.cancel({ ... }) ``` +### Arguments [_arguments_tasks.cancel] -### Arguments [_arguments_455] - -* **Request (object):** - - * **`task_id` (Optional, string | number)**: The task identifier. - * **`actions` (Optional, string | string[])**: A list or wildcard expression of actions that is used to limit the request. - * **`nodes` (Optional, string[])**: A list of node IDs or names that is used to limit the request. - * **`parent_task_id` (Optional, string)**: A parent task ID that is used to limit the tasks. - * **`wait_for_completion` (Optional, boolean)**: If true, the request blocks until all found tasks are complete. - +#### Request (object) [_request_tasks.cancel] +- **`task_id` (Optional, string | number)**: The task identifier. +- **`actions` (Optional, string | string[])**: A list or wildcard expression of actions that is used to limit the request. +- **`nodes` (Optional, string[])**: A list of node IDs or names that is used to limit the request. +- **`parent_task_id` (Optional, string)**: A parent task ID that is used to limit the tasks. +- **`wait_for_completion` (Optional, boolean)**: If true, the request blocks until all found tasks are complete. +## client.tasks.get [_tasks.get] +Get task information. +Get information about a task currently running in the cluster. -### get [_get_10] - -Get task information. Get information about a task currently running in the cluster. - -::::{warning} -The task management API is new and should still be considered a beta feature. The API may change in ways that are not backwards compatible. -:::: - +WARNING: The task management API is new and should still be considered a beta feature. +The API may change in ways that are not backwards compatible. If the task identifier is not found, a 404 response code indicates that there are no resources that match the request. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-tasks) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/group/endpoint-tasks) ```ts client.tasks.get({ task_id }) ``` +### Arguments [_arguments_tasks.get] -### Arguments [_arguments_456] +#### Request (object) [_request_tasks.get] +- **`task_id` (string)**: The task identifier. +- **`timeout` (Optional, string | -1 | 0)**: The period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. +- **`wait_for_completion` (Optional, boolean)**: If `true`, the request blocks until the task has completed. -* **Request (object):** - - * **`task_id` (string)**: The task identifier. - * **`timeout` (Optional, string | -1 | 0)**: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - * **`wait_for_completion` (Optional, boolean)**: If `true`, the request blocks until the task has completed. - - - -### list [_list_3] - -Get all tasks. Get information about the tasks currently running on one or more nodes in the cluster. - -::::{warning} -The task management API is new and should still be considered a beta feature. The API may change in ways that are not backwards compatible. -:::: +## client.tasks.list [_tasks.list] +Get all tasks. +Get information about the tasks currently running on one or more nodes in the cluster. +WARNING: The task management API is new and should still be considered a beta feature. +The API may change in ways that are not backwards compatible. **Identifying running tasks** -The `X-Opaque-Id header`, when provided on the HTTP request header, is going to be returned as a header in the response as well as in the headers field for in the task information. This enables you to track certain calls or associate certain tasks with the client that started them. For example: +The `X-Opaque-Id header`, when provided on the HTTP request header, is going to be returned as a header in the response as well as in the headers field for in the task information. +This enables you to track certain calls or associate certain tasks with the client that started them. +For example: ``` curl -i -H "X-Opaque-Id: 123456" "http://localhost:9200/_tasks?group_by=parents" @@ -13429,38 +14422,40 @@ content-length: 831 } } ``` +In this example, `X-Opaque-Id: 123456` is the ID as a part of the response header. +The `X-Opaque-Id` in the task `headers` is the ID for the task that was initiated by the REST request. +The `X-Opaque-Id` in the children `headers` is the child task of the task that was initiated by the REST request. -In this example, `X-Opaque-Id: 123456` is the ID as a part of the response header. The `X-Opaque-Id` in the task `headers` is the ID for the task that was initiated by the REST request. The `X-Opaque-Id` in the children `headers` is the child task of the task that was initiated by the REST request. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-tasks) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/group/endpoint-tasks) ```ts client.tasks.list({ ... }) ``` +### Arguments [_arguments_tasks.list] -### Arguments [_arguments_457] +#### Request (object) [_request_tasks.list] +- **`actions` (Optional, string | string[])**: A list or wildcard expression of actions used to limit the request. +For example, you can use `cluser:*` to retrieve all cluster-related tasks. +- **`detailed` (Optional, boolean)**: If `true`, the response includes detailed information about the running tasks. +This information is useful to distinguish tasks from each other but is more costly to run. +- **`group_by` (Optional, Enum("nodes" | "parents" | "none"))**: A key that is used to group tasks in the response. +The task lists can be grouped either by nodes or by parent tasks. +- **`nodes` (Optional, string | string[])**: A list of node IDs or names that is used to limit the returned information. +- **`parent_task_id` (Optional, string)**: A parent task identifier that is used to limit returned information. +To return all tasks, omit this parameter or use a value of `-1`. +If the parent task is not found, the API does not return a 404 response code. +- **`timeout` (Optional, string | -1 | 0)**: The period to wait for each node to respond. +If a node does not respond before its timeout expires, the response does not include its information. +However, timed out nodes are included in the `node_failures` property. +- **`wait_for_completion` (Optional, boolean)**: If `true`, the request blocks until the operation is complete. -* **Request (object):** +## client.textStructure.findFieldStructure [_text_structure.find_field_structure] +Find the structure of a text field. +Find the structure of a text field in an Elasticsearch index. - * **`actions` (Optional, string | string[])**: A list or wildcard expression of actions used to limit the request. For example, you can use `cluser:*` to retrieve all cluster-related tasks. - * **`detailed` (Optional, boolean)**: If `true`, the response includes detailed information about the running tasks. This information is useful to distinguish tasks from each other but is more costly to run. - * **`group_by` (Optional, Enum("nodes" | "parents" | "none"))**: A key that is used to group tasks in the response. The task lists can be grouped either by nodes or by parent tasks. - * **`nodes` (Optional, string | string[])**: A list of node IDs or names that is used to limit the returned information. - * **`parent_task_id` (Optional, string)**: A parent task identifier that is used to limit returned information. To return all tasks, omit this parameter or use a value of `-1`. If the parent task is not found, the API does not return a 404 response code. - * **`timeout` (Optional, string | -1 | 0)**: The period to wait for each node to respond. If a node does not respond before its timeout expires, the response does not include its information. However, timed out nodes are included in the `node_failures` property. - * **`wait_for_completion` (Optional, boolean)**: If `true`, the request blocks until the operation is complete. - - - -## text_structure [_text_structure] - - -### find_field_structure [_find_field_structure] - -Find the structure of a text field. Find the structure of a text field in an Elasticsearch index. - -This API provides a starting point for extracting further information from log messages already ingested into Elasticsearch. For example, if you have ingested data into a very simple index that has just `@timestamp` and message fields, you can use this API to see what common structure exists in the message field. +This API provides a starting point for extracting further information from log messages already ingested into Elasticsearch. +For example, if you have ingested data into a very simple index that has just `@timestamp` and message fields, you can use this API to see what common structure exists in the message field. The response from the API contains: @@ -13469,41 +14464,68 @@ The response from the API contains: * Information about the structure of the text, which is useful when you write ingest configurations to index it or similarly formatted text. * Appropriate mappings for an Elasticsearch index, which you could use to ingest the text. -All this information can be calculated by the structure finder with no guidance. However, you can optionally override some of the decisions about the text structure by specifying one or more query parameters. +All this information can be calculated by the structure finder with no guidance. +However, you can optionally override some of the decisions about the text structure by specifying one or more query parameters. -If the structure finder produces unexpected results, specify the `explain` query parameter and an explanation will appear in the response. It helps determine why the returned structure was chosen. +If the structure finder produces unexpected results, specify the `explain` query parameter and an explanation will appear in the response. +It helps determine why the returned structure was chosen. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-text_structure) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/group/endpoint-text_structure) ```ts client.textStructure.findFieldStructure({ field, index }) ``` - -### Arguments [_arguments_458] - -* **Request (object):** - - * **`field` (string)**: The field that should be analyzed. - * **`index` (string)**: The name of the index that contains the analyzed field. - * **`column_names` (Optional, string)**: If `format` is set to `delimited`, you can specify the column names in a list. If this parameter is not specified, the structure finder uses the column names from the header row of the text. If the text does not have a header row, columns are named "column1", "column2", "column3", for example. - * **`delimiter` (Optional, string)**: If you have set `format` to `delimited`, you can specify the character used to delimit the values in each row. Only a single character is supported; the delimiter cannot have multiple characters. By default, the API considers the following possibilities: comma, tab, semi-colon, and pipe (`|`). In this default scenario, all rows must have the same number of fields for the delimited format to be detected. If you specify a delimiter, up to 10% of the rows can have a different number of columns than the first row. - * **`documents_to_sample` (Optional, number)**: The number of documents to include in the structural analysis. The minimum value is 2. - * **`ecs_compatibility` (Optional, Enum("disabled" | "v1"))**: The mode of compatibility with ECS compliant Grok patterns. Use this parameter to specify whether to use ECS Grok patterns instead of legacy ones when the structure finder creates a Grok pattern. This setting primarily has an impact when a whole message Grok pattern such as `%{{CATALINALOG}}` matches the input. If the structure finder identifies a common structure but has no idea of the meaning then generic field names such as `path`, `ipaddress`, `field1`, and `field2` are used in the `grok_pattern` output. The intention in that situation is that a user who knows the meanings will rename the fields before using them. - * **`explain` (Optional, boolean)**: If `true`, the response includes a field named `explanation`, which is an array of strings that indicate how the structure finder produced its result. - * **`format` (Optional, Enum("delimited" | "ndjson" | "semi_structured_text" | "xml"))**: The high level structure of the text. By default, the API chooses the format. In this default scenario, all rows must have the same number of fields for a delimited format to be detected. If the format is set to delimited and the delimiter is not set, however, the API tolerates up to 5% of rows that have a different number of columns than the first row. - * **`grok_pattern` (Optional, string)**: If the format is `semi_structured_text`, you can specify a Grok pattern that is used to extract fields from every message in the text. The name of the timestamp field in the Grok pattern must match what is specified in the `timestamp_field` parameter. If that parameter is not specified, the name of the timestamp field in the Grok pattern must match "timestamp". If `grok_pattern` is not specified, the structure finder creates a Grok pattern. - * **`quote` (Optional, string)**: If the format is `delimited`, you can specify the character used to quote the values in each row if they contain newlines or the delimiter character. Only a single character is supported. If this parameter is not specified, the default value is a double quote (`"`). If your delimited text format does not use quoting, a workaround is to set this argument to a character that does not appear anywhere in the sample. - * **`should_trim_fields` (Optional, boolean)**: If the format is `delimited`, you can specify whether values between delimiters should have whitespace trimmed from them. If this parameter is not specified and the delimiter is pipe (`|`), the default value is true. Otherwise, the default value is `false`. - * **`timeout` (Optional, string | -1 | 0)**: The maximum amount of time that the structure analysis can take. If the analysis is still running when the timeout expires, it will be stopped. - * **`timestamp_field` (Optional, string)**: The name of the field that contains the primary timestamp of each record in the text. In particular, if the text was ingested into an index, this is the field that would be used to populate the `@timestamp` field. - - -If the format is `semi_structured_text`, this field must match the name of the appropriate extraction in the `grok_pattern`. Therefore, for semi-structured text, it is best not to specify this parameter unless `grok_pattern` is also specified. +### Arguments [_arguments_text_structure.find_field_structure] + +#### Request (object) [_request_text_structure.find_field_structure] +- **`field` (string)**: The field that should be analyzed. +- **`index` (string)**: The name of the index that contains the analyzed field. +- **`column_names` (Optional, string)**: If `format` is set to `delimited`, you can specify the column names in a list. +If this parameter is not specified, the structure finder uses the column names from the header row of the text. +If the text does not have a header row, columns are named "column1", "column2", "column3", for example. +- **`delimiter` (Optional, string)**: If you have set `format` to `delimited`, you can specify the character used to delimit the values in each row. +Only a single character is supported; the delimiter cannot have multiple characters. +By default, the API considers the following possibilities: comma, tab, semi-colon, and pipe (`|`). +In this default scenario, all rows must have the same number of fields for the delimited format to be detected. +If you specify a delimiter, up to 10% of the rows can have a different number of columns than the first row. +- **`documents_to_sample` (Optional, number)**: The number of documents to include in the structural analysis. +The minimum value is 2. +- **`ecs_compatibility` (Optional, Enum("disabled" | "v1"))**: The mode of compatibility with ECS compliant Grok patterns. +Use this parameter to specify whether to use ECS Grok patterns instead of legacy ones when the structure finder creates a Grok pattern. +This setting primarily has an impact when a whole message Grok pattern such as `%{CATALINALOG}` matches the input. +If the structure finder identifies a common structure but has no idea of the meaning then generic field names such as `path`, `ipaddress`, `field1`, and `field2` are used in the `grok_pattern` output. +The intention in that situation is that a user who knows the meanings will rename the fields before using them. +- **`explain` (Optional, boolean)**: If `true`, the response includes a field named `explanation`, which is an array of strings that indicate how the structure finder produced its result. +- **`format` (Optional, Enum("delimited" | "ndjson" | "semi_structured_text" | "xml"))**: The high level structure of the text. +By default, the API chooses the format. +In this default scenario, all rows must have the same number of fields for a delimited format to be detected. +If the format is set to delimited and the delimiter is not set, however, the API tolerates up to 5% of rows that have a different number of columns than the first row. +- **`grok_pattern` (Optional, string)**: If the format is `semi_structured_text`, you can specify a Grok pattern that is used to extract fields from every message in the text. +The name of the timestamp field in the Grok pattern must match what is specified in the `timestamp_field` parameter. +If that parameter is not specified, the name of the timestamp field in the Grok pattern must match "timestamp". +If `grok_pattern` is not specified, the structure finder creates a Grok pattern. +- **`quote` (Optional, string)**: If the format is `delimited`, you can specify the character used to quote the values in each row if they contain newlines or the delimiter character. +Only a single character is supported. +If this parameter is not specified, the default value is a double quote (`"`). +If your delimited text format does not use quoting, a workaround is to set this argument to a character that does not appear anywhere in the sample. +- **`should_trim_fields` (Optional, boolean)**: If the format is `delimited`, you can specify whether values between delimiters should have whitespace trimmed from them. +If this parameter is not specified and the delimiter is pipe (`|`), the default value is true. +Otherwise, the default value is `false`. +- **`timeout` (Optional, string | -1 | 0)**: The maximum amount of time that the structure analysis can take. +If the analysis is still running when the timeout expires, it will be stopped. +- **`timestamp_field` (Optional, string)**: The name of the field that contains the primary timestamp of each record in the text. +In particular, if the text was ingested into an index, this is the field that would be used to populate the `@timestamp` field. + +If the format is `semi_structured_text`, this field must match the name of the appropriate extraction in the `grok_pattern`. +Therefore, for semi-structured text, it is best not to specify this parameter unless `grok_pattern` is also specified. For structured text, if you specify this parameter, the field must exist within the text. -If this parameter is not specified, the structure finder makes a decision about which field (if any) is the primary timestamp field. For structured text, it is not compulsory to have a timestamp in the text. *** *`timestamp_format` (Optional, string)**: The Java time format of the timestamp field in the text. Only a subset of Java time format letter groups are supported: +If this parameter is not specified, the structure finder makes a decision about which field (if any) is the primary timestamp field. +For structured text, it is not compulsory to have a timestamp in the text. +- **`timestamp_format` (Optional, string)**: The Java time format of the timestamp field in the text. +Only a subset of Java time format letter groups are supported: * `a` * `d` @@ -13525,60 +14547,91 @@ If this parameter is not specified, the structure finder makes a decision about * `yyyy` * `zzz` -Additionally `S` letter groups (fractional seconds) of length one to nine are supported providing they occur after `ss` and are separated from the `ss` by a period (`.`), comma (`,`), or colon (`:`). Spacing and punctuation is also permitted with the exception a question mark (`?`), newline, and carriage return, together with literal text enclosed in single quotes. For example, `MM/dd HH.mm.ss,SSSSSS 'in' yyyy` is a valid override format. +Additionally `S` letter groups (fractional seconds) of length one to nine are supported providing they occur after `ss` and are separated from the `ss` by a period (`.`), comma (`,`), or colon (`:`). +Spacing and punctuation is also permitted with the exception a question mark (`?`), newline, and carriage return, together with literal text enclosed in single quotes. +For example, `MM/dd HH.mm.ss,SSSSSS 'in' yyyy` is a valid override format. -One valuable use case for this parameter is when the format is semi-structured text, there are multiple timestamp formats in the text, and you know which format corresponds to the primary timestamp, but you do not want to specify the full `grok_pattern`. Another is when the timestamp format is one that the structure finder does not consider by default. +One valuable use case for this parameter is when the format is semi-structured text, there are multiple timestamp formats in the text, and you know which format corresponds to the primary timestamp, but you do not want to specify the full `grok_pattern`. +Another is when the timestamp format is one that the structure finder does not consider by default. If this parameter is not specified, the structure finder chooses the best format from a built-in set. -If the special value `null` is specified, the structure finder will not look for a primary timestamp in the text. When the format is semi-structured text, this will result in the structure finder treating the text as single-line messages. - +If the special value `null` is specified, the structure finder will not look for a primary timestamp in the text. +When the format is semi-structured text, this will result in the structure finder treating the text as single-line messages. -### find_message_structure [_find_message_structure] +## client.textStructure.findMessageStructure [_text_structure.find_message_structure] +Find the structure of text messages. +Find the structure of a list of text messages. +The messages must contain data that is suitable to be ingested into Elasticsearch. -Find the structure of text messages. Find the structure of a list of text messages. The messages must contain data that is suitable to be ingested into Elasticsearch. - -This API provides a starting point for ingesting data into Elasticsearch in a format that is suitable for subsequent use with other Elastic Stack functionality. Use this API rather than the find text structure API if your input text has already been split up into separate messages by some other process. +This API provides a starting point for ingesting data into Elasticsearch in a format that is suitable for subsequent use with other Elastic Stack functionality. +Use this API rather than the find text structure API if your input text has already been split up into separate messages by some other process. The response from the API contains: * Sample messages. * Statistics that reveal the most common values for all fields detected within the text and basic numeric statistics for numeric fields. -* Information about the structure of the text, which is useful when you write ingest configurations to index it or similarly formatted text. Appropriate mappings for an Elasticsearch index, which you could use to ingest the text. +* Information about the structure of the text, which is useful when you write ingest configurations to index it or similarly formatted text. +Appropriate mappings for an Elasticsearch index, which you could use to ingest the text. -All this information can be calculated by the structure finder with no guidance. However, you can optionally override some of the decisions about the text structure by specifying one or more query parameters. +All this information can be calculated by the structure finder with no guidance. +However, you can optionally override some of the decisions about the text structure by specifying one or more query parameters. -If the structure finder produces unexpected results, specify the `explain` query parameter and an explanation will appear in the response. It helps determine why the returned structure was chosen. +If the structure finder produces unexpected results, specify the `explain` query parameter and an explanation will appear in the response. +It helps determine why the returned structure was chosen. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-text-structure-find-message-structure) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-text-structure-find-message-structure) ```ts client.textStructure.findMessageStructure({ messages }) ``` - -### Arguments [_arguments_459] - -* **Request (object):** - - * **`messages` (string[])**: The list of messages you want to analyze. - * **`column_names` (Optional, string)**: If the format is `delimited`, you can specify the column names in a list. If this parameter is not specified, the structure finder uses the column names from the header row of the text. If the text does not have a header role, columns are named "column1", "column2", "column3", for example. - * **`delimiter` (Optional, string)**: If you the format is `delimited`, you can specify the character used to delimit the values in each row. Only a single character is supported; the delimiter cannot have multiple characters. By default, the API considers the following possibilities: comma, tab, semi-colon, and pipe (`|`). In this default scenario, all rows must have the same number of fields for the delimited format to be detected. If you specify a delimiter, up to 10% of the rows can have a different number of columns than the first row. - * **`ecs_compatibility` (Optional, Enum("disabled" | "v1"))**: The mode of compatibility with ECS compliant Grok patterns. Use this parameter to specify whether to use ECS Grok patterns instead of legacy ones when the structure finder creates a Grok pattern. This setting primarily has an impact when a whole message Grok pattern such as `%{{CATALINALOG}}` matches the input. If the structure finder identifies a common structure but has no idea of meaning then generic field names such as `path`, `ipaddress`, `field1`, and `field2` are used in the `grok_pattern` output, with the intention that a user who knows the meanings rename these fields before using it. - * **`explain` (Optional, boolean)**: If this parameter is set to true, the response includes a field named `explanation`, which is an array of strings that indicate how the structure finder produced its result. - * **`format` (Optional, Enum("delimited" | "ndjson" | "semi_structured_text" | "xml"))**: The high level structure of the text. By default, the API chooses the format. In this default scenario, all rows must have the same number of fields for a delimited format to be detected. If the format is `delimited` and the delimiter is not set, however, the API tolerates up to 5% of rows that have a different number of columns than the first row. - * **`grok_pattern` (Optional, string)**: If the format is `semi_structured_text`, you can specify a Grok pattern that is used to extract fields from every message in the text. The name of the timestamp field in the Grok pattern must match what is specified in the `timestamp_field` parameter. If that parameter is not specified, the name of the timestamp field in the Grok pattern must match "timestamp". If `grok_pattern` is not specified, the structure finder creates a Grok pattern. - * **`quote` (Optional, string)**: If the format is `delimited`, you can specify the character used to quote the values in each row if they contain newlines or the delimiter character. Only a single character is supported. If this parameter is not specified, the default value is a double quote (`"`). If your delimited text format does not use quoting, a workaround is to set this argument to a character that does not appear anywhere in the sample. - * **`should_trim_fields` (Optional, boolean)**: If the format is `delimited`, you can specify whether values between delimiters should have whitespace trimmed from them. If this parameter is not specified and the delimiter is pipe (`|`), the default value is true. Otherwise, the default value is `false`. - * **`timeout` (Optional, string | -1 | 0)**: The maximum amount of time that the structure analysis can take. If the analysis is still running when the timeout expires, it will be stopped. - * **`timestamp_field` (Optional, string)**: The name of the field that contains the primary timestamp of each record in the text. In particular, if the text was ingested into an index, this is the field that would be used to populate the `@timestamp` field. - - -If the format is `semi_structured_text`, this field must match the name of the appropriate extraction in the `grok_pattern`. Therefore, for semi-structured text, it is best not to specify this parameter unless `grok_pattern` is also specified. +### Arguments [_arguments_text_structure.find_message_structure] + +#### Request (object) [_request_text_structure.find_message_structure] +- **`messages` (string[])**: The list of messages you want to analyze. +- **`column_names` (Optional, string)**: If the format is `delimited`, you can specify the column names in a list. +If this parameter is not specified, the structure finder uses the column names from the header row of the text. +If the text does not have a header role, columns are named "column1", "column2", "column3", for example. +- **`delimiter` (Optional, string)**: If you the format is `delimited`, you can specify the character used to delimit the values in each row. +Only a single character is supported; the delimiter cannot have multiple characters. +By default, the API considers the following possibilities: comma, tab, semi-colon, and pipe (`|`). +In this default scenario, all rows must have the same number of fields for the delimited format to be detected. +If you specify a delimiter, up to 10% of the rows can have a different number of columns than the first row. +- **`ecs_compatibility` (Optional, Enum("disabled" | "v1"))**: The mode of compatibility with ECS compliant Grok patterns. +Use this parameter to specify whether to use ECS Grok patterns instead of legacy ones when the structure finder creates a Grok pattern. +This setting primarily has an impact when a whole message Grok pattern such as `%{CATALINALOG}` matches the input. +If the structure finder identifies a common structure but has no idea of meaning then generic field names such as `path`, `ipaddress`, `field1`, and `field2` are used in the `grok_pattern` output, with the intention that a user who knows the meanings rename these fields before using it. +- **`explain` (Optional, boolean)**: If this parameter is set to true, the response includes a field named `explanation`, which is an array of strings that indicate how the structure finder produced its result. +- **`format` (Optional, Enum("delimited" | "ndjson" | "semi_structured_text" | "xml"))**: The high level structure of the text. +By default, the API chooses the format. +In this default scenario, all rows must have the same number of fields for a delimited format to be detected. +If the format is `delimited` and the delimiter is not set, however, the API tolerates up to 5% of rows that have a different number of columns than the first row. +- **`grok_pattern` (Optional, string)**: If the format is `semi_structured_text`, you can specify a Grok pattern that is used to extract fields from every message in the text. +The name of the timestamp field in the Grok pattern must match what is specified in the `timestamp_field` parameter. +If that parameter is not specified, the name of the timestamp field in the Grok pattern must match "timestamp". +If `grok_pattern` is not specified, the structure finder creates a Grok pattern. +- **`quote` (Optional, string)**: If the format is `delimited`, you can specify the character used to quote the values in each row if they contain newlines or the delimiter character. +Only a single character is supported. +If this parameter is not specified, the default value is a double quote (`"`). +If your delimited text format does not use quoting, a workaround is to set this argument to a character that does not appear anywhere in the sample. +- **`should_trim_fields` (Optional, boolean)**: If the format is `delimited`, you can specify whether values between delimiters should have whitespace trimmed from them. +If this parameter is not specified and the delimiter is pipe (`|`), the default value is true. +Otherwise, the default value is `false`. +- **`timeout` (Optional, string | -1 | 0)**: The maximum amount of time that the structure analysis can take. +If the analysis is still running when the timeout expires, it will be stopped. +- **`timestamp_field` (Optional, string)**: The name of the field that contains the primary timestamp of each record in the text. +In particular, if the text was ingested into an index, this is the field that would be used to populate the `@timestamp` field. + +If the format is `semi_structured_text`, this field must match the name of the appropriate extraction in the `grok_pattern`. +Therefore, for semi-structured text, it is best not to specify this parameter unless `grok_pattern` is also specified. For structured text, if you specify this parameter, the field must exist within the text. -If this parameter is not specified, the structure finder makes a decision about which field (if any) is the primary timestamp field. For structured text, it is not compulsory to have a timestamp in the text. *** *`timestamp_format` (Optional, string)**: The Java time format of the timestamp field in the text. Only a subset of Java time format letter groups are supported: +If this parameter is not specified, the structure finder makes a decision about which field (if any) is the primary timestamp field. +For structured text, it is not compulsory to have a timestamp in the text. +- **`timestamp_format` (Optional, string)**: The Java time format of the timestamp field in the text. +Only a subset of Java time format letter groups are supported: * `a` * `d` @@ -13600,20 +14653,26 @@ If this parameter is not specified, the structure finder makes a decision about * `yyyy` * `zzz` -Additionally `S` letter groups (fractional seconds) of length one to nine are supported providing they occur after `ss` and are separated from the `ss` by a period (`.`), comma (`,`), or colon (`:`). Spacing and punctuation is also permitted with the exception a question mark (`?`), newline, and carriage return, together with literal text enclosed in single quotes. For example, `MM/dd HH.mm.ss,SSSSSS 'in' yyyy` is a valid override format. +Additionally `S` letter groups (fractional seconds) of length one to nine are supported providing they occur after `ss` and are separated from the `ss` by a period (`.`), comma (`,`), or colon (`:`). +Spacing and punctuation is also permitted with the exception a question mark (`?`), newline, and carriage return, together with literal text enclosed in single quotes. +For example, `MM/dd HH.mm.ss,SSSSSS 'in' yyyy` is a valid override format. -One valuable use case for this parameter is when the format is semi-structured text, there are multiple timestamp formats in the text, and you know which format corresponds to the primary timestamp, but you do not want to specify the full `grok_pattern`. Another is when the timestamp format is one that the structure finder does not consider by default. +One valuable use case for this parameter is when the format is semi-structured text, there are multiple timestamp formats in the text, and you know which format corresponds to the primary timestamp, but you do not want to specify the full `grok_pattern`. +Another is when the timestamp format is one that the structure finder does not consider by default. If this parameter is not specified, the structure finder chooses the best format from a built-in set. -If the special value `null` is specified, the structure finder will not look for a primary timestamp in the text. When the format is semi-structured text, this will result in the structure finder treating the text as single-line messages. - - -### find_structure [_find_structure] +If the special value `null` is specified, the structure finder will not look for a primary timestamp in the text. +When the format is semi-structured text, this will result in the structure finder treating the text as single-line messages. -Find the structure of a text file. The text file must contain data that is suitable to be ingested into Elasticsearch. +## client.textStructure.findStructure [_text_structure.find_structure] +Find the structure of a text file. +The text file must contain data that is suitable to be ingested into Elasticsearch. -This API provides a starting point for ingesting data into Elasticsearch in a format that is suitable for subsequent use with other Elastic Stack functionality. Unlike other Elasticsearch endpoints, the data that is posted to this endpoint does not need to be UTF-8 encoded and in JSON format. It must, however, be text; binary text formats are not currently supported. The size is limited to the Elasticsearch HTTP receive buffer size, which defaults to 100 Mb. +This API provides a starting point for ingesting data into Elasticsearch in a format that is suitable for subsequent use with other Elastic Stack functionality. +Unlike other Elasticsearch endpoints, the data that is posted to this endpoint does not need to be UTF-8 encoded and in JSON format. +It must, however, be text; binary text formats are not currently supported. +The size is limited to the Elasticsearch HTTP receive buffer size, which defaults to 100 Mb. The response from the API contains: @@ -13622,42 +14681,78 @@ The response from the API contains: * Information about the structure of the text, which is useful when you write ingest configurations to index it or similarly formatted text. * Appropriate mappings for an Elasticsearch index, which you could use to ingest the text. -All this information can be calculated by the structure finder with no guidance. However, you can optionally override some of the decisions about the text structure by specifying one or more query parameters. +All this information can be calculated by the structure finder with no guidance. +However, you can optionally override some of the decisions about the text structure by specifying one or more query parameters. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-text-structure-find-structure) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-text-structure-find-structure) ```ts client.textStructure.findStructure({ ... }) ``` - -### Arguments [_arguments_460] - -* **Request (object):** - - * **`text_files` (Optional, TJsonDocument[])** - * **`charset` (Optional, string)**: The text’s character set. It must be a character set that is supported by the JVM that Elasticsearch uses. For example, `UTF-8`, `UTF-16LE`, `windows-1252`, or `EUC-JP`. If this parameter is not specified, the structure finder chooses an appropriate character set. - * **`column_names` (Optional, string)**: If you have set format to `delimited`, you can specify the column names in a list. If this parameter is not specified, the structure finder uses the column names from the header row of the text. If the text does not have a header role, columns are named "column1", "column2", "column3", for example. - * **`delimiter` (Optional, string)**: If you have set `format` to `delimited`, you can specify the character used to delimit the values in each row. Only a single character is supported; the delimiter cannot have multiple characters. By default, the API considers the following possibilities: comma, tab, semi-colon, and pipe (`|`). In this default scenario, all rows must have the same number of fields for the delimited format to be detected. If you specify a delimiter, up to 10% of the rows can have a different number of columns than the first row. - * **`ecs_compatibility` (Optional, string)**: The mode of compatibility with ECS compliant Grok patterns. Use this parameter to specify whether to use ECS Grok patterns instead of legacy ones when the structure finder creates a Grok pattern. Valid values are `disabled` and `v1`. This setting primarily has an impact when a whole message Grok pattern such as `%{{CATALINALOG}}` matches the input. If the structure finder identifies a common structure but has no idea of meaning then generic field names such as `path`, `ipaddress`, `field1`, and `field2` are used in the `grok_pattern` output, with the intention that a user who knows the meanings rename these fields before using it. - * **`explain` (Optional, boolean)**: If this parameter is set to `true`, the response includes a field named explanation, which is an array of strings that indicate how the structure finder produced its result. If the structure finder produces unexpected results for some text, use this query parameter to help you determine why the returned structure was chosen. - * **`format` (Optional, string)**: The high level structure of the text. Valid values are `ndjson`, `xml`, `delimited`, and `semi_structured_text`. By default, the API chooses the format. In this default scenario, all rows must have the same number of fields for a delimited format to be detected. If the format is set to `delimited` and the delimiter is not set, however, the API tolerates up to 5% of rows that have a different number of columns than the first row. - * **`grok_pattern` (Optional, string)**: If you have set `format` to `semi_structured_text`, you can specify a Grok pattern that is used to extract fields from every message in the text. The name of the timestamp field in the Grok pattern must match what is specified in the `timestamp_field` parameter. If that parameter is not specified, the name of the timestamp field in the Grok pattern must match "timestamp". If `grok_pattern` is not specified, the structure finder creates a Grok pattern. - * **`has_header_row` (Optional, boolean)**: If you have set `format` to `delimited`, you can use this parameter to indicate whether the column names are in the first row of the text. If this parameter is not specified, the structure finder guesses based on the similarity of the first row of the text to other rows. - * **`line_merge_size_limit` (Optional, number)**: The maximum number of characters in a message when lines are merged to form messages while analyzing semi-structured text. If you have extremely long messages you may need to increase this, but be aware that this may lead to very long processing times if the way to group lines into messages is misdetected. - * **`lines_to_sample` (Optional, number)**: The number of lines to include in the structural analysis, starting from the beginning of the text. The minimum is 2. If the value of this parameter is greater than the number of lines in the text, the analysis proceeds (as long as there are at least two lines in the text) for all of the lines. - - -::::{note} -The number of lines and the variation of the lines affects the speed of the analysis. For example, if you upload text where the first 1000 lines are all variations on the same message, the analysis will find more commonality than would be seen with a bigger sample. If possible, however, it is more efficient to upload sample text with more variety in the first 1000 lines than to request analysis of 100000 lines to achieve some variety. ** *`quote` (Optional, string)**: If you have set `format` to `delimited`, you can specify the character used to quote the values in each row if they contain newlines or the delimiter character. Only a single character is supported. If this parameter is not specified, the default value is a double quote (`"`). If your delimited text format does not use quoting, a workaround is to set this argument to a character that does not appear anywhere in the sample. *** *`should_trim_fields` (Optional, boolean)**: If you have set `format` to `delimited`, you can specify whether values between delimiters should have whitespace trimmed from them. If this parameter is not specified and the delimiter is pipe (`|`), the default value is `true`. Otherwise, the default value is `false`. *** *`timeout` (Optional, string | -1 | 0)**: The maximum amount of time that the structure analysis can take. If the analysis is still running when the timeout expires then it will be stopped. ** *`timestamp_field` (Optional, string)**: The name of the field that contains the primary timestamp of each record in the text. In particular, if the text were ingested into an index, this is the field that would be used to populate the `@timestamp` field. -:::: - - -If the `format` is `semi_structured_text`, this field must match the name of the appropriate extraction in the `grok_pattern`. Therefore, for semi-structured text, it is best not to specify this parameter unless `grok_pattern` is also specified. +### Arguments [_arguments_text_structure.find_structure] + +#### Request (object) [_request_text_structure.find_structure] +- **`text_files` (Optional, TJsonDocument[])** +- **`charset` (Optional, string)**: The text's character set. +It must be a character set that is supported by the JVM that Elasticsearch uses. +For example, `UTF-8`, `UTF-16LE`, `windows-1252`, or `EUC-JP`. +If this parameter is not specified, the structure finder chooses an appropriate character set. +- **`column_names` (Optional, string)**: If you have set format to `delimited`, you can specify the column names in a list. +If this parameter is not specified, the structure finder uses the column names from the header row of the text. +If the text does not have a header role, columns are named "column1", "column2", "column3", for example. +- **`delimiter` (Optional, string)**: If you have set `format` to `delimited`, you can specify the character used to delimit the values in each row. +Only a single character is supported; the delimiter cannot have multiple characters. +By default, the API considers the following possibilities: comma, tab, semi-colon, and pipe (`|`). +In this default scenario, all rows must have the same number of fields for the delimited format to be detected. +If you specify a delimiter, up to 10% of the rows can have a different number of columns than the first row. +- **`ecs_compatibility` (Optional, string)**: The mode of compatibility with ECS compliant Grok patterns. +Use this parameter to specify whether to use ECS Grok patterns instead of legacy ones when the structure finder creates a Grok pattern. +Valid values are `disabled` and `v1`. +This setting primarily has an impact when a whole message Grok pattern such as `%{CATALINALOG}` matches the input. +If the structure finder identifies a common structure but has no idea of meaning then generic field names such as `path`, `ipaddress`, `field1`, and `field2` are used in the `grok_pattern` output, with the intention that a user who knows the meanings rename these fields before using it. +- **`explain` (Optional, boolean)**: If this parameter is set to `true`, the response includes a field named explanation, which is an array of strings that indicate how the structure finder produced its result. +If the structure finder produces unexpected results for some text, use this query parameter to help you determine why the returned structure was chosen. +- **`format` (Optional, string)**: The high level structure of the text. +Valid values are `ndjson`, `xml`, `delimited`, and `semi_structured_text`. +By default, the API chooses the format. +In this default scenario, all rows must have the same number of fields for a delimited format to be detected. +If the format is set to `delimited` and the delimiter is not set, however, the API tolerates up to 5% of rows that have a different number of columns than the first row. +- **`grok_pattern` (Optional, string)**: If you have set `format` to `semi_structured_text`, you can specify a Grok pattern that is used to extract fields from every message in the text. +The name of the timestamp field in the Grok pattern must match what is specified in the `timestamp_field` parameter. +If that parameter is not specified, the name of the timestamp field in the Grok pattern must match "timestamp". +If `grok_pattern` is not specified, the structure finder creates a Grok pattern. +- **`has_header_row` (Optional, boolean)**: If you have set `format` to `delimited`, you can use this parameter to indicate whether the column names are in the first row of the text. +If this parameter is not specified, the structure finder guesses based on the similarity of the first row of the text to other rows. +- **`line_merge_size_limit` (Optional, number)**: The maximum number of characters in a message when lines are merged to form messages while analyzing semi-structured text. +If you have extremely long messages you may need to increase this, but be aware that this may lead to very long processing times if the way to group lines into messages is misdetected. +- **`lines_to_sample` (Optional, number)**: The number of lines to include in the structural analysis, starting from the beginning of the text. +The minimum is 2. +If the value of this parameter is greater than the number of lines in the text, the analysis proceeds (as long as there are at least two lines in the text) for all of the lines. + +NOTE: The number of lines and the variation of the lines affects the speed of the analysis. +For example, if you upload text where the first 1000 lines are all variations on the same message, the analysis will find more commonality than would be seen with a bigger sample. +If possible, however, it is more efficient to upload sample text with more variety in the first 1000 lines than to request analysis of 100000 lines to achieve some variety. +- **`quote` (Optional, string)**: If you have set `format` to `delimited`, you can specify the character used to quote the values in each row if they contain newlines or the delimiter character. +Only a single character is supported. +If this parameter is not specified, the default value is a double quote (`"`). +If your delimited text format does not use quoting, a workaround is to set this argument to a character that does not appear anywhere in the sample. +- **`should_trim_fields` (Optional, boolean)**: If you have set `format` to `delimited`, you can specify whether values between delimiters should have whitespace trimmed from them. +If this parameter is not specified and the delimiter is pipe (`|`), the default value is `true`. +Otherwise, the default value is `false`. +- **`timeout` (Optional, string | -1 | 0)**: The maximum amount of time that the structure analysis can take. +If the analysis is still running when the timeout expires then it will be stopped. +- **`timestamp_field` (Optional, string)**: The name of the field that contains the primary timestamp of each record in the text. +In particular, if the text were ingested into an index, this is the field that would be used to populate the `@timestamp` field. + +If the `format` is `semi_structured_text`, this field must match the name of the appropriate extraction in the `grok_pattern`. +Therefore, for semi-structured text, it is best not to specify this parameter unless `grok_pattern` is also specified. For structured text, if you specify this parameter, the field must exist within the text. -If this parameter is not specified, the structure finder makes a decision about which field (if any) is the primary timestamp field. For structured text, it is not compulsory to have a timestamp in the text. *** *`timestamp_format` (Optional, string)**: The Java time format of the timestamp field in the text. +If this parameter is not specified, the structure finder makes a decision about which field (if any) is the primary timestamp field. +For structured text, it is not compulsory to have a timestamp in the text. +- **`timestamp_format` (Optional, string)**: The Java time format of the timestamp field in the text. Only a subset of Java time format letter groups are supported: @@ -13681,697 +14776,716 @@ Only a subset of Java time format letter groups are supported: * `yyyy` * `zzz` -Additionally `S` letter groups (fractional seconds) of length one to nine are supported providing they occur after `ss` and separated from the `ss` by a `.`, `,` or `:`. Spacing and punctuation is also permitted with the exception of `?`, newline and carriage return, together with literal text enclosed in single quotes. For example, `MM/dd HH.mm.ss,SSSSSS 'in' yyyy` is a valid override format. +Additionally `S` letter groups (fractional seconds) of length one to nine are supported providing they occur after `ss` and separated from the `ss` by a `.`, `,` or `:`. +Spacing and punctuation is also permitted with the exception of `?`, newline and carriage return, together with literal text enclosed in single quotes. +For example, `MM/dd HH.mm.ss,SSSSSS 'in' yyyy` is a valid override format. -One valuable use case for this parameter is when the format is semi-structured text, there are multiple timestamp formats in the text, and you know which format corresponds to the primary timestamp, but you do not want to specify the full `grok_pattern`. Another is when the timestamp format is one that the structure finder does not consider by default. +One valuable use case for this parameter is when the format is semi-structured text, there are multiple timestamp formats in the text, and you know which format corresponds to the primary timestamp, but you do not want to specify the full `grok_pattern`. +Another is when the timestamp format is one that the structure finder does not consider by default. If this parameter is not specified, the structure finder chooses the best format from a built-in set. -If the special value `null` is specified the structure finder will not look for a primary timestamp in the text. When the format is semi-structured text this will result in the structure finder treating the text as single-line messages. - +If the special value `null` is specified the structure finder will not look for a primary timestamp in the text. +When the format is semi-structured text this will result in the structure finder treating the text as single-line messages. -### test_grok_pattern [_test_grok_pattern] +## client.textStructure.testGrokPattern [_text_structure.test_grok_pattern] +Test a Grok pattern. +Test a Grok pattern on one or more lines of text. +The API indicates whether the lines match the pattern together with the offsets and lengths of the matched substrings. -Test a Grok pattern. Test a Grok pattern on one or more lines of text. The API indicates whether the lines match the pattern together with the offsets and lengths of the matched substrings. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-text-structure-test-grok-pattern) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-text-structure-test-grok-pattern) ```ts client.textStructure.testGrokPattern({ grok_pattern, text }) ``` +### Arguments [_arguments_text_structure.test_grok_pattern] -### Arguments [_arguments_461] - -* **Request (object):** - - * **`grok_pattern` (string)**: The Grok pattern to run on the text. - * **`text` (string[])**: The lines of text to run the Grok pattern on. - * **`ecs_compatibility` (Optional, string)**: The mode of compatibility with ECS compliant Grok patterns. Use this parameter to specify whether to use ECS Grok patterns instead of legacy ones when the structure finder creates a Grok pattern. Valid values are `disabled` and `v1`. - - - -## transform [_transform] - - -### delete_transform [_delete_transform] +#### Request (object) [_request_text_structure.test_grok_pattern] +- **`grok_pattern` (string)**: The Grok pattern to run on the text. +- **`text` (string[])**: The lines of text to run the Grok pattern on. +- **`ecs_compatibility` (Optional, string)**: The mode of compatibility with ECS compliant Grok patterns. +Use this parameter to specify whether to use ECS Grok patterns instead of legacy ones when the structure finder creates a Grok pattern. +Valid values are `disabled` and `v1`. -Delete a transform. Deletes a transform. +## client.transform.deleteTransform [_transform.delete_transform] +Delete a transform. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-delete-transform) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-transform-delete-transform) ```ts client.transform.deleteTransform({ transform_id }) ``` +### Arguments [_arguments_transform.delete_transform] -### Arguments [_arguments_462] - -* **Request (object):** - - * **`transform_id` (string)**: Identifier for the transform. - * **`force` (Optional, boolean)**: If this value is false, the transform must be stopped before it can be deleted. If true, the transform is deleted regardless of its current state. - * **`delete_dest_index` (Optional, boolean)**: If this value is true, the destination index is deleted together with the transform. If false, the destination index will not be deleted - * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - - - -### get_node_stats [_get_node_stats] +#### Request (object) [_request_transform.delete_transform] +- **`transform_id` (string)**: Identifier for the transform. +- **`force` (Optional, boolean)**: If this value is false, the transform must be stopped before it can be deleted. If true, the transform is +deleted regardless of its current state. +- **`delete_dest_index` (Optional, boolean)**: If this value is true, the destination index is deleted together with the transform. If false, the destination +index will not be deleted +- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +## client.transform.getNodeStats [_transform.get_node_stats] Retrieves transform usage information for transform nodes. +[Endpoint documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/get-transform-node-stats.html) + ```ts client.transform.getNodeStats() ``` -### get_transform [_get_transform] +## client.transform.getTransform [_transform.get_transform] +Get transforms. +Get configuration information for transforms. -Get transforms. Retrieves configuration information for transforms. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-get-transform) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-transform-get-transform) ```ts client.transform.getTransform({ ... }) ``` +### Arguments [_arguments_transform.get_transform] -### Arguments [_arguments_463] - -* **Request (object):** +#### Request (object) [_request_transform.get_transform] +- **`transform_id` (Optional, string | string[])**: Identifier for the transform. It can be a transform identifier or a +wildcard expression. You can get information for all transforms by using +`_all`, by specifying `*` as the ``, or by omitting the +``. +- **`allow_no_match` (Optional, boolean)**: Specifies what to do when the request: - * **`transform_id` (Optional, string | string[])**: Identifier for the transform. It can be a transform identifier or a wildcard expression. You can get information for all transforms by using `_all`, by specifying `*` as the ``, or by omitting the ``. - * **`allow_no_match` (Optional, boolean)**: Specifies what to do when the request: +1. Contains wildcard expressions and there are no transforms that match. +2. Contains the _all string or no identifiers and there are no matches. +3. Contains wildcard expressions and there are only partial matches. - 1. Contains wildcard expressions and there are no transforms that match. - 2. Contains the _all string or no identifiers and there are no matches. - 3. Contains wildcard expressions and there are only partial matches. +If this parameter is false, the request returns a 404 status code when +there are no matches or only partial matches. +- **`from` (Optional, number)**: Skips the specified number of transforms. +- **`size` (Optional, number)**: Specifies the maximum number of transforms to obtain. +- **`exclude_generated` (Optional, boolean)**: Excludes fields that were automatically added when creating the +transform. This allows the configuration to be in an acceptable format to +be retrieved and then added to another cluster. +## client.transform.getTransformStats [_transform.get_transform_stats] +Get transform stats. -If this parameter is false, the request returns a 404 status code when there are no matches or only partial matches. ** *`from` (Optional, number)**: Skips the specified number of transforms. *** *`size` (Optional, number)**: Specifies the maximum number of transforms to obtain. ** *`exclude_generated` (Optional, boolean)**: Excludes fields that were automatically added when creating the transform. This allows the configuration to be in an acceptable format to be retrieved and then added to another cluster. +Get usage information for transforms. - -### get_transform_stats [_get_transform_stats] - -Get transform stats. Retrieves usage information for transforms. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-get-transform-stats) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-transform-get-transform-stats) ```ts client.transform.getTransformStats({ transform_id }) ``` +### Arguments [_arguments_transform.get_transform_stats] -### Arguments [_arguments_464] - -* **Request (object):** - - * **`transform_id` (string | string[])**: Identifier for the transform. It can be a transform identifier or a wildcard expression. You can get information for all transforms by using `_all`, by specifying `*` as the ``, or by omitting the ``. - * **`allow_no_match` (Optional, boolean)**: Specifies what to do when the request: - - 1. Contains wildcard expressions and there are no transforms that match. - 2. Contains the _all string or no identifiers and there are no matches. - 3. Contains wildcard expressions and there are only partial matches. - - -If this parameter is false, the request returns a 404 status code when there are no matches or only partial matches. ** *`from` (Optional, number)**: Skips the specified number of transforms. *** *`size` (Optional, number)**: Specifies the maximum number of transforms to obtain. ** *`timeout` (Optional, string | -1 | 0)**: Controls the time to wait for the stats +#### Request (object) [_request_transform.get_transform_stats] +- **`transform_id` (string | string[])**: Identifier for the transform. It can be a transform identifier or a +wildcard expression. You can get information for all transforms by using +`_all`, by specifying `*` as the ``, or by omitting the +``. +- **`allow_no_match` (Optional, boolean)**: Specifies what to do when the request: +1. Contains wildcard expressions and there are no transforms that match. +2. Contains the _all string or no identifiers and there are no matches. +3. Contains wildcard expressions and there are only partial matches. -### preview_transform [_preview_transform] +If this parameter is false, the request returns a 404 status code when +there are no matches or only partial matches. +- **`from` (Optional, number)**: Skips the specified number of transforms. +- **`size` (Optional, number)**: Specifies the maximum number of transforms to obtain. +- **`timeout` (Optional, string | -1 | 0)**: Controls the time to wait for the stats -Preview a transform. Generates a preview of the results that you will get when you create a transform with the same configuration. +## client.transform.previewTransform [_transform.preview_transform] +Preview a transform. +Generates a preview of the results that you will get when you create a transform with the same configuration. -It returns a maximum of 100 results. The calculations are based on all the current data in the source index. It also generates a list of mappings and settings for the destination index. These values are determined based on the field types of the source index and the transform aggregations. +It returns a maximum of 100 results. The calculations are based on all the current data in the source index. It also +generates a list of mappings and settings for the destination index. These values are determined based on the field +types of the source index and the transform aggregations. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-preview-transform) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-transform-preview-transform) ```ts client.transform.previewTransform({ ... }) ``` - -### Arguments [_arguments_465] - -* **Request (object):** - - * **`transform_id` (Optional, string)**: Identifier for the transform to preview. If you specify this path parameter, you cannot provide transform configuration details in the request body. - * **`dest` (Optional, { index, op_type, pipeline, routing, version_type })**: The destination for the transform. - * **`description` (Optional, string)**: Free text description of the transform. - * **`frequency` (Optional, string | -1 | 0)**: The interval between checks for changes in the source indices when the transform is running continuously. Also determines the retry interval in the event of transient failures while the transform is searching or indexing. The minimum value is 1s and the maximum is 1h. - * **`pivot` (Optional, { aggregations, group_by })**: The pivot method transforms the data by aggregating and grouping it. These objects define the group by fields and the aggregation to reduce the data. - * **`source` (Optional, { index, query, remote, size, slice, sort, _source, runtime_mappings })**: The source of the data for the transform. - * **`settings` (Optional, { align_checkpoints, dates_as_epoch_millis, deduce_mappings, docs_per_second, max_page_search_size, unattended })**: Defines optional transform settings. - * **`sync` (Optional, { time })**: Defines the properties transforms require to run continuously. - * **`retention_policy` (Optional, { time })**: Defines a retention policy for the transform. Data that meets the defined criteria is deleted from the destination index. - * **`latest` (Optional, { sort, unique_key })**: The latest method transforms the data by finding the latest document for each unique key. - * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - - - -### put_transform [_put_transform] - -Create a transform. Creates a transform. - -A transform copies data from source indices, transforms it, and persists it into an entity-centric destination index. You can also think of the destination index as a two-dimensional tabular data structure (known as a data frame). The ID for each document in the data frame is generated from a hash of the entity, so there is a unique row per entity. - -You must choose either the latest or pivot method for your transform; you cannot use both in a single transform. If you choose to use the pivot method for your transform, the entities are defined by the set of `group_by` fields in the pivot object. If you choose to use the latest method, the entities are defined by the `unique_key` field values in the latest object. - -You must have `create_index`, `index`, and `read` privileges on the destination index and `read` and `view_index_metadata` privileges on the source indices. When Elasticsearch security features are enabled, the transform remembers which roles the user that created it had at the time of creation and uses those same roles. If those roles do not have the required privileges on the source and destination indices, the transform fails when it attempts unauthorized operations. - -::::{note} -You must use Kibana or this API to create a transform. Do not add a transform directly into any `.transform-internal*` indices using the Elasticsearch index API. If Elasticsearch security features are enabled, do not give users any privileges on `.transform-internal*` indices. If you used transforms prior to 7.5, also do not give users any privileges on `.data-frame-internal*` indices. -:::: - - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-put-transform) +### Arguments [_arguments_transform.preview_transform] + +#### Request (object) [_request_transform.preview_transform] +- **`transform_id` (Optional, string)**: Identifier for the transform to preview. If you specify this path parameter, you cannot provide transform +configuration details in the request body. +- **`dest` (Optional, { index, op_type, pipeline, routing, version_type })**: The destination for the transform. +- **`description` (Optional, string)**: Free text description of the transform. +- **`frequency` (Optional, string | -1 | 0)**: The interval between checks for changes in the source indices when the +transform is running continuously. Also determines the retry interval in +the event of transient failures while the transform is searching or +indexing. The minimum value is 1s and the maximum is 1h. +- **`pivot` (Optional, { aggregations, group_by })**: The pivot method transforms the data by aggregating and grouping it. +These objects define the group by fields and the aggregation to reduce +the data. +- **`source` (Optional, { index, query, remote, size, slice, sort, _source, runtime_mappings })**: The source of the data for the transform. +- **`settings` (Optional, { align_checkpoints, dates_as_epoch_millis, deduce_mappings, docs_per_second, max_page_search_size, unattended })**: Defines optional transform settings. +- **`sync` (Optional, { time })**: Defines the properties transforms require to run continuously. +- **`retention_policy` (Optional, { time })**: Defines a retention policy for the transform. Data that meets the defined +criteria is deleted from the destination index. +- **`latest` (Optional, { sort, unique_key })**: The latest method transforms the data by finding the latest document for +each unique key. +- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the +timeout expires, the request fails and returns an error. + +## client.transform.putTransform [_transform.put_transform] +Create a transform. +Creates a transform. + +A transform copies data from source indices, transforms it, and persists it into an entity-centric destination index. You can also think of the destination index as a two-dimensional tabular data structure (known as +a data frame). The ID for each document in the data frame is generated from a hash of the entity, so there is a +unique row per entity. + +You must choose either the latest or pivot method for your transform; you cannot use both in a single transform. If +you choose to use the pivot method for your transform, the entities are defined by the set of `group_by` fields in +the pivot object. If you choose to use the latest method, the entities are defined by the `unique_key` field values +in the latest object. + +You must have `create_index`, `index`, and `read` privileges on the destination index and `read` and +`view_index_metadata` privileges on the source indices. When Elasticsearch security features are enabled, the +transform remembers which roles the user that created it had at the time of creation and uses those same roles. If +those roles do not have the required privileges on the source and destination indices, the transform fails when it +attempts unauthorized operations. + +NOTE: You must use Kibana or this API to create a transform. Do not add a transform directly into any +`.transform-internal*` indices using the Elasticsearch index API. If Elasticsearch security features are enabled, do +not give users any privileges on `.transform-internal*` indices. If you used transforms prior to 7.5, also do not +give users any privileges on `.data-frame-internal*` indices. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-transform-put-transform) ```ts client.transform.putTransform({ transform_id, dest, source }) ``` - -### Arguments [_arguments_466] - -* **Request (object):** - - * **`transform_id` (string)**: Identifier for the transform. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It has a 64 character limit and must start and end with alphanumeric characters. - * **`dest` ({ index, op_type, pipeline, routing, version_type })**: The destination for the transform. - * **`source` ({ index, query, remote, size, slice, sort, _source, runtime_mappings })**: The source of the data for the transform. - * **`description` (Optional, string)**: Free text description of the transform. - * **`frequency` (Optional, string | -1 | 0)**: The interval between checks for changes in the source indices when the transform is running continuously. Also determines the retry interval in the event of transient failures while the transform is searching or indexing. The minimum value is `1s` and the maximum is `1h`. - * **`latest` (Optional, { sort, unique_key })**: The latest method transforms the data by finding the latest document for each unique key. - * **`_meta` (Optional, Record)**: Defines optional transform metadata. - * **`pivot` (Optional, { aggregations, group_by })**: The pivot method transforms the data by aggregating and grouping it. These objects define the group by fields and the aggregation to reduce the data. - * **`retention_policy` (Optional, { time })**: Defines a retention policy for the transform. Data that meets the defined criteria is deleted from the destination index. - * **`settings` (Optional, { align_checkpoints, dates_as_epoch_millis, deduce_mappings, docs_per_second, max_page_search_size, unattended })**: Defines optional transform settings. - * **`sync` (Optional, { time })**: Defines the properties transforms require to run continuously. - * **`defer_validation` (Optional, boolean)**: When the transform is created, a series of validations occur to ensure its success. For example, there is a check for the existence of the source indices and a check that the destination index is not part of the source index pattern. You can use this parameter to skip the checks, for example when the source index does not exist until after the transform is created. The validations are always run when you start the transform, however, with the exception of privilege checks. - * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - - - -### reset_transform [_reset_transform] - -Reset a transform. Resets a transform. Before you can reset it, you must stop it; alternatively, use the `force` query parameter. If the destination index was created by the transform, it is deleted. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-reset-transform) +### Arguments [_arguments_transform.put_transform] + +#### Request (object) [_request_transform.put_transform] +- **`transform_id` (string)**: Identifier for the transform. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), +hyphens, and underscores. It has a 64 character limit and must start and end with alphanumeric characters. +- **`dest` ({ index, op_type, pipeline, routing, version_type })**: The destination for the transform. +- **`source` ({ index, query, remote, size, slice, sort, _source, runtime_mappings })**: The source of the data for the transform. +- **`description` (Optional, string)**: Free text description of the transform. +- **`frequency` (Optional, string | -1 | 0)**: The interval between checks for changes in the source indices when the transform is running continuously. Also +determines the retry interval in the event of transient failures while the transform is searching or indexing. +The minimum value is `1s` and the maximum is `1h`. +- **`latest` (Optional, { sort, unique_key })**: The latest method transforms the data by finding the latest document for each unique key. +- **`_meta` (Optional, Record)**: Defines optional transform metadata. +- **`pivot` (Optional, { aggregations, group_by })**: The pivot method transforms the data by aggregating and grouping it. These objects define the group by fields +and the aggregation to reduce the data. +- **`retention_policy` (Optional, { time })**: Defines a retention policy for the transform. Data that meets the defined criteria is deleted from the +destination index. +- **`settings` (Optional, { align_checkpoints, dates_as_epoch_millis, deduce_mappings, docs_per_second, max_page_search_size, unattended })**: Defines optional transform settings. +- **`sync` (Optional, { time })**: Defines the properties transforms require to run continuously. +- **`defer_validation` (Optional, boolean)**: When the transform is created, a series of validations occur to ensure its success. For example, there is a +check for the existence of the source indices and a check that the destination index is not part of the source +index pattern. You can use this parameter to skip the checks, for example when the source index does not exist +until after the transform is created. The validations are always run when you start the transform, however, with +the exception of privilege checks. +- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. + +## client.transform.resetTransform [_transform.reset_transform] +Reset a transform. + +Before you can reset it, you must stop it; alternatively, use the `force` query parameter. +If the destination index was created by the transform, it is deleted. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-transform-reset-transform) ```ts client.transform.resetTransform({ transform_id }) ``` +### Arguments [_arguments_transform.reset_transform] -### Arguments [_arguments_467] - -* **Request (object):** - - * **`transform_id` (string)**: Identifier for the transform. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It has a 64 character limit and must start and end with alphanumeric characters. - * **`force` (Optional, boolean)**: If this value is `true`, the transform is reset regardless of its current state. If it’s `false`, the transform must be stopped before it can be reset. - * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - +#### Request (object) [_request_transform.reset_transform] +- **`transform_id` (string)**: Identifier for the transform. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), +hyphens, and underscores. It has a 64 character limit and must start and end with alphanumeric characters. +- **`force` (Optional, boolean)**: If this value is `true`, the transform is reset regardless of its current state. If it's `false`, the transform +must be stopped before it can be reset. +- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +## client.transform.scheduleNowTransform [_transform.schedule_now_transform] +Schedule a transform to start now. -### schedule_now_transform [_schedule_now_transform] +Instantly run a transform to process data. +If you run this API, the transform will process the new data instantly, +without waiting for the configured frequency interval. After the API is called, +the transform will be processed again at `now + frequency` unless the API +is called again in the meantime. -Schedule a transform to start now. Instantly runs a transform to process data. - -If you _schedule_now a transform, it will process the new data instantly, without waiting for the configured frequency interval. After _schedule_now API is called, the transform will be processed again at now + frequency unless _schedule_now API is called again in the meantime. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-schedule-now-transform) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-transform-schedule-now-transform) ```ts client.transform.scheduleNowTransform({ transform_id }) ``` +### Arguments [_arguments_transform.schedule_now_transform] -### Arguments [_arguments_468] - -* **Request (object):** - - * **`transform_id` (string)**: Identifier for the transform. - * **`timeout` (Optional, string | -1 | 0)**: Controls the time to wait for the scheduling to take place +#### Request (object) [_request_transform.schedule_now_transform] +- **`transform_id` (string)**: Identifier for the transform. +- **`timeout` (Optional, string | -1 | 0)**: Controls the time to wait for the scheduling to take place +## client.transform.startTransform [_transform.start_transform] +Start a transform. +When you start a transform, it creates the destination index if it does not already exist. The `number_of_shards` is +set to `1` and the `auto_expand_replicas` is set to `0-1`. If it is a pivot transform, it deduces the mapping +definitions for the destination index from the source indices and the transform aggregations. If fields in the +destination index are derived from scripts (as in the case of `scripted_metric` or `bucket_script` aggregations), +the transform uses dynamic mappings unless an index template exists. If it is a latest transform, it does not deduce +mapping definitions; it uses dynamic mappings. To use explicit mappings, create the destination index before you +start the transform. Alternatively, you can create an index template, though it does not affect the deduced mappings +in a pivot transform. -### start_transform [_start_transform] +When the transform starts, a series of validations occur to ensure its success. If you deferred validation when you +created the transform, they occur when you start the transform—​with the exception of privilege checks. When +Elasticsearch security features are enabled, the transform remembers which roles the user that created it had at the +time of creation and uses those same roles. If those roles do not have the required privileges on the source and +destination indices, the transform fails when it attempts unauthorized operations. -Start a transform. Starts a transform. - -When you start a transform, it creates the destination index if it does not already exist. The `number_of_shards` is set to `1` and the `auto_expand_replicas` is set to `0-1`. If it is a pivot transform, it deduces the mapping definitions for the destination index from the source indices and the transform aggregations. If fields in the destination index are derived from scripts (as in the case of `scripted_metric` or `bucket_script` aggregations), the transform uses dynamic mappings unless an index template exists. If it is a latest transform, it does not deduce mapping definitions; it uses dynamic mappings. To use explicit mappings, create the destination index before you start the transform. Alternatively, you can create an index template, though it does not affect the deduced mappings in a pivot transform. - -When the transform starts, a series of validations occur to ensure its success. If you deferred validation when you created the transform, they occur when you start the transform—​with the exception of privilege checks. When Elasticsearch security features are enabled, the transform remembers which roles the user that created it had at the time of creation and uses those same roles. If those roles do not have the required privileges on the source and destination indices, the transform fails when it attempts unauthorized operations. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-start-transform) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-transform-start-transform) ```ts client.transform.startTransform({ transform_id }) ``` +### Arguments [_arguments_transform.start_transform] -### Arguments [_arguments_469] - -* **Request (object):** +#### Request (object) [_request_transform.start_transform] +- **`transform_id` (string)**: Identifier for the transform. +- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. +- **`from` (Optional, string)**: Restricts the set of transformed entities to those changed after this time. Relative times like now-30d are supported. Only applicable for continuous transforms. - * **`transform_id` (string)**: Identifier for the transform. - * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - * **`from` (Optional, string)**: Restricts the set of transformed entities to those changed after this time. Relative times like now-30d are supported. Only applicable for continuous transforms. +## client.transform.stopTransform [_transform.stop_transform] +Stop transforms. +Stops one or more transforms. - - -### stop_transform [_stop_transform] - -Stop transforms. Stops one or more transforms. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-stop-transform) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-transform-stop-transform) ```ts client.transform.stopTransform({ transform_id }) ``` +### Arguments [_arguments_transform.stop_transform] -### Arguments [_arguments_470] - -* **Request (object):** - - * **`transform_id` (string)**: Identifier for the transform. To stop multiple transforms, use a list or a wildcard expression. To stop all transforms, use `_all` or `*` as the identifier. - * **`allow_no_match` (Optional, boolean)**: Specifies what to do when the request: contains wildcard expressions and there are no transforms that match; contains the `_all` string or no identifiers and there are no matches; contains wildcard expressions and there are only partial matches. - - -If it is true, the API returns a successful acknowledgement message when there are no matches. When there are only partial matches, the API stops the appropriate transforms. +#### Request (object) [_request_transform.stop_transform] +- **`transform_id` (string)**: Identifier for the transform. To stop multiple transforms, use a list or a wildcard expression. +To stop all transforms, use `_all` or `*` as the identifier. +- **`allow_no_match` (Optional, boolean)**: Specifies what to do when the request: contains wildcard expressions and there are no transforms that match; +contains the `_all` string or no identifiers and there are no matches; contains wildcard expressions and there +are only partial matches. -If it is false, the request returns a 404 status code when there are no matches or only partial matches. ** *`force` (Optional, boolean)**: If it is true, the API forcefully stops the transforms. *** *`timeout` (Optional, string | -1 | 0)**: Period to wait for a response when `wait_for_completion` is `true`. If no response is received before the timeout expires, the request returns a timeout exception. However, the request continues processing and eventually moves the transform to a STOPPED state. *** *`wait_for_checkpoint` (Optional, boolean)**: If it is true, the transform does not completely stop until the current checkpoint is completed. If it is false, the transform stops as soon as possible. ** *`wait_for_completion` (Optional, boolean)**: If it is true, the API blocks until the indexer state completely stops. If it is false, the API returns immediately and the indexer is stopped asynchronously in the background. +If it is true, the API returns a successful acknowledgement message when there are no matches. When there are +only partial matches, the API stops the appropriate transforms. +If it is false, the request returns a 404 status code when there are no matches or only partial matches. +- **`force` (Optional, boolean)**: If it is true, the API forcefully stops the transforms. +- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response when `wait_for_completion` is `true`. If no response is received before the +timeout expires, the request returns a timeout exception. However, the request continues processing and +eventually moves the transform to a STOPPED state. +- **`wait_for_checkpoint` (Optional, boolean)**: If it is true, the transform does not completely stop until the current checkpoint is completed. If it is false, +the transform stops as soon as possible. +- **`wait_for_completion` (Optional, boolean)**: If it is true, the API blocks until the indexer state completely stops. If it is false, the API returns +immediately and the indexer is stopped asynchronously in the background. -### update_transform [_update_transform] +## client.transform.updateTransform [_transform.update_transform] +Update a transform. +Updates certain properties of a transform. -Update a transform. Updates certain properties of a transform. +All updated properties except `description` do not take effect until after the transform starts the next checkpoint, +thus there is data consistency in each checkpoint. To use this API, you must have `read` and `view_index_metadata` +privileges for the source indices. You must also have `index` and `read` privileges for the destination index. When +Elasticsearch security features are enabled, the transform remembers which roles the user who updated it had at the +time of update and runs with those privileges. -All updated properties except `description` do not take effect until after the transform starts the next checkpoint, thus there is data consistency in each checkpoint. To use this API, you must have `read` and `view_index_metadata` privileges for the source indices. You must also have `index` and `read` privileges for the destination index. When Elasticsearch security features are enabled, the transform remembers which roles the user who updated it had at the time of update and runs with those privileges. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-update-transform) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-transform-update-transform) ```ts client.transform.updateTransform({ transform_id }) ``` - -### Arguments [_arguments_471] - -* **Request (object):** - - * **`transform_id` (string)**: Identifier for the transform. - * **`dest` (Optional, { index, op_type, pipeline, routing, version_type })**: The destination for the transform. - * **`description` (Optional, string)**: Free text description of the transform. - * **`frequency` (Optional, string | -1 | 0)**: The interval between checks for changes in the source indices when the transform is running continuously. Also determines the retry interval in the event of transient failures while the transform is searching or indexing. The minimum value is 1s and the maximum is 1h. - * **`_meta` (Optional, Record)**: Defines optional transform metadata. - * **`source` (Optional, { index, query, remote, size, slice, sort, _source, runtime_mappings })**: The source of the data for the transform. - * **`settings` (Optional, { align_checkpoints, dates_as_epoch_millis, deduce_mappings, docs_per_second, max_page_search_size, unattended })**: Defines optional transform settings. - * **`sync` (Optional, { time })**: Defines the properties transforms require to run continuously. - * **`retention_policy` (Optional, { time } | null)**: Defines a retention policy for the transform. Data that meets the defined criteria is deleted from the destination index. - * **`defer_validation` (Optional, boolean)**: When true, deferrable validations are not run. This behavior may be desired if the source index does not exist until after the transform is created. - * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - - - -### upgrade_transforms [_upgrade_transforms] - -Upgrade all transforms. Transforms are compatible across minor versions and between supported major versions. However, over time, the format of transform configuration information may change. This API identifies transforms that have a legacy configuration format and upgrades them to the latest version. It also cleans up the internal data structures that store the transform state and checkpoints. The upgrade does not affect the source and destination indices. The upgrade also does not affect the roles that transforms use when Elasticsearch security features are enabled; the role used to read source data and write to the destination index remains unchanged. - -If a transform upgrade step fails, the upgrade stops and an error is returned about the underlying issue. Resolve the issue then re-run the process again. A summary is returned when the upgrade is finished. - -To ensure continuous transforms remain running during a major version upgrade of the cluster – for example, from 7.16 to 8.0 – it is recommended to upgrade transforms before upgrading the cluster. You may want to perform a recent cluster backup prior to the upgrade. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-upgrade-transforms) +### Arguments [_arguments_transform.update_transform] + +#### Request (object) [_request_transform.update_transform] +- **`transform_id` (string)**: Identifier for the transform. +- **`dest` (Optional, { index, op_type, pipeline, routing, version_type })**: The destination for the transform. +- **`description` (Optional, string)**: Free text description of the transform. +- **`frequency` (Optional, string | -1 | 0)**: The interval between checks for changes in the source indices when the +transform is running continuously. Also determines the retry interval in +the event of transient failures while the transform is searching or +indexing. The minimum value is 1s and the maximum is 1h. +- **`_meta` (Optional, Record)**: Defines optional transform metadata. +- **`source` (Optional, { index, query, remote, size, slice, sort, _source, runtime_mappings })**: The source of the data for the transform. +- **`settings` (Optional, { align_checkpoints, dates_as_epoch_millis, deduce_mappings, docs_per_second, max_page_search_size, unattended })**: Defines optional transform settings. +- **`sync` (Optional, { time })**: Defines the properties transforms require to run continuously. +- **`retention_policy` (Optional, { time } | null)**: Defines a retention policy for the transform. Data that meets the defined +criteria is deleted from the destination index. +- **`defer_validation` (Optional, boolean)**: When true, deferrable validations are not run. This behavior may be +desired if the source index does not exist until after the transform is +created. +- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the +timeout expires, the request fails and returns an error. + +## client.transform.upgradeTransforms [_transform.upgrade_transforms] +Upgrade all transforms. + +Transforms are compatible across minor versions and between supported major versions. +However, over time, the format of transform configuration information may change. +This API identifies transforms that have a legacy configuration format and upgrades them to the latest version. +It also cleans up the internal data structures that store the transform state and checkpoints. +The upgrade does not affect the source and destination indices. +The upgrade also does not affect the roles that transforms use when Elasticsearch security features are enabled; the role used to read source data and write to the destination index remains unchanged. + +If a transform upgrade step fails, the upgrade stops and an error is returned about the underlying issue. +Resolve the issue then re-run the process again. +A summary is returned when the upgrade is finished. + +To ensure continuous transforms remain running during a major version upgrade of the cluster – for example, from 7.16 to 8.0 – it is recommended to upgrade transforms before upgrading the cluster. +You may want to perform a recent cluster backup prior to the upgrade. + +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-transform-upgrade-transforms) ```ts client.transform.upgradeTransforms({ ... }) ``` +### Arguments [_arguments_transform.upgrade_transforms] -### Arguments [_arguments_472] - -* **Request (object):** - - * **`dry_run` (Optional, boolean)**: When true, the request checks for updates but does not run them. - * **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - - - -## watcher [_watcher] +#### Request (object) [_request_transform.upgrade_transforms] +- **`dry_run` (Optional, boolean)**: When true, the request checks for updates but does not run them. +- **`timeout` (Optional, string | -1 | 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and +returns an error. - -### ack_watch [_ack_watch] - -Acknowledge a watch. Acknowledging a watch enables you to manually throttle the execution of the watch’s actions. +## client.watcher.ackWatch [_watcher.ack_watch] +Acknowledge a watch. +Acknowledging a watch enables you to manually throttle the execution of the watch's actions. The acknowledgement state of an action is stored in the `status.actions..ack.state` structure. -::::{important} -If the specified watch is currently being executed, this API will return an error The reason for this behavior is to prevent overwriting the watch status from a watch execution. -:::: +IMPORTANT: If the specified watch is currently being executed, this API will return an error +The reason for this behavior is to prevent overwriting the watch status from a watch execution. +Acknowledging an action throttles further executions of that action until its `ack.state` is reset to `awaits_successful_execution`. +This happens when the condition of the watch is not met (the condition evaluates to false). -Acknowledging an action throttles further executions of that action until its `ack.state` is reset to `awaits_successful_execution`. This happens when the condition of the watch is not met (the condition evaluates to false). - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-ack-watch) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-watcher-ack-watch) ```ts client.watcher.ackWatch({ watch_id }) ``` +### Arguments [_arguments_watcher.ack_watch] -### Arguments [_arguments_473] - -* **Request (object):** - - * **`watch_id` (string)**: The watch identifier. - * **`action_id` (Optional, string | string[])**: A list of the action identifiers to acknowledge. If you omit this parameter, all of the actions of the watch are acknowledged. - - - -### activate_watch [_activate_watch] +#### Request (object) [_request_watcher.ack_watch] +- **`watch_id` (string)**: The watch identifier. +- **`action_id` (Optional, string | string[])**: A list of the action identifiers to acknowledge. +If you omit this parameter, all of the actions of the watch are acknowledged. -Activate a watch. A watch can be either active or inactive. +## client.watcher.activateWatch [_watcher.activate_watch] +Activate a watch. +A watch can be either active or inactive. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-activate-watch) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-watcher-activate-watch) ```ts client.watcher.activateWatch({ watch_id }) ``` +### Arguments [_arguments_watcher.activate_watch] -### Arguments [_arguments_474] +#### Request (object) [_request_watcher.activate_watch] +- **`watch_id` (string)**: The watch identifier. -* **Request (object):** +## client.watcher.deactivateWatch [_watcher.deactivate_watch] +Deactivate a watch. +A watch can be either active or inactive. - * **`watch_id` (string)**: The watch identifier. - - - -### deactivate_watch [_deactivate_watch] - -Deactivate a watch. A watch can be either active or inactive. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-deactivate-watch) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-watcher-deactivate-watch) ```ts client.watcher.deactivateWatch({ watch_id }) ``` +### Arguments [_arguments_watcher.deactivate_watch] -### Arguments [_arguments_475] - -* **Request (object):** - - * **`watch_id` (string)**: The watch identifier. - +#### Request (object) [_request_watcher.deactivate_watch] +- **`watch_id` (string)**: The watch identifier. - -### delete_watch [_delete_watch] - -Delete a watch. When the watch is removed, the document representing the watch in the `.watches` index is gone and it will never be run again. +## client.watcher.deleteWatch [_watcher.delete_watch] +Delete a watch. +When the watch is removed, the document representing the watch in the `.watches` index is gone and it will never be run again. Deleting a watch does not delete any watch execution records related to this watch from the watch history. -::::{important} -Deleting a watch must be done by using only this API. Do not delete the watch directly from the `.watches` index using the Elasticsearch delete document API When Elasticsearch security features are enabled, make sure no write privileges are granted to anyone for the `.watches` index. -:::: +IMPORTANT: Deleting a watch must be done by using only this API. +Do not delete the watch directly from the `.watches` index using the Elasticsearch delete document API +When Elasticsearch security features are enabled, make sure no write privileges are granted to anyone for the `.watches` index. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-delete-watch) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-watcher-delete-watch) ```ts client.watcher.deleteWatch({ id }) ``` +### Arguments [_arguments_watcher.delete_watch] -### Arguments [_arguments_476] - -* **Request (object):** - - * **`id` (string)**: The watch identifier. - - - -### execute_watch [_execute_watch] +#### Request (object) [_request_watcher.delete_watch] +- **`id` (string)**: The watch identifier. -Run a watch. This API can be used to force execution of the watch outside of its triggering logic or to simulate the watch execution for debugging purposes. +## client.watcher.executeWatch [_watcher.execute_watch] +Run a watch. +This API can be used to force execution of the watch outside of its triggering logic or to simulate the watch execution for debugging purposes. -For testing and debugging purposes, you also have fine-grained control on how the watch runs. You can run the watch without running all of its actions or alternatively by simulating them. You can also force execution by ignoring the watch condition and control whether a watch record would be written to the watch history after it runs. +For testing and debugging purposes, you also have fine-grained control on how the watch runs. +You can run the watch without running all of its actions or alternatively by simulating them. +You can also force execution by ignoring the watch condition and control whether a watch record would be written to the watch history after it runs. -You can use the run watch API to run watches that are not yet registered by specifying the watch definition inline. This serves as great tool for testing and debugging your watches prior to adding them to Watcher. +You can use the run watch API to run watches that are not yet registered by specifying the watch definition inline. +This serves as great tool for testing and debugging your watches prior to adding them to Watcher. -When Elasticsearch security features are enabled on your cluster, watches are run with the privileges of the user that stored the watches. If your user is allowed to read index `a`, but not index `b`, then the exact same set of rules will apply during execution of a watch. +When Elasticsearch security features are enabled on your cluster, watches are run with the privileges of the user that stored the watches. +If your user is allowed to read index `a`, but not index `b`, then the exact same set of rules will apply during execution of a watch. When using the run watch API, the authorization data of the user that called the API will be used as a base, instead of the information who stored the watch. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-execute-watch) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-watcher-execute-watch) ```ts client.watcher.executeWatch({ ... }) ``` +### Arguments [_arguments_watcher.execute_watch] -### Arguments [_arguments_477] +#### Request (object) [_request_watcher.execute_watch] +- **`id` (Optional, string)**: The watch identifier. +- **`action_modes` (Optional, Record)**: Determines how to handle the watch actions as part of the watch execution. +- **`alternative_input` (Optional, Record)**: When present, the watch uses this object as a payload instead of executing its own input. +- **`ignore_condition` (Optional, boolean)**: When set to `true`, the watch execution uses the always condition. This can also be specified as an HTTP parameter. +- **`record_execution` (Optional, boolean)**: When set to `true`, the watch record representing the watch execution result is persisted to the `.watcher-history` index for the current time. +In addition, the status of the watch is updated, possibly throttling subsequent runs. +This can also be specified as an HTTP parameter. +- **`simulated_actions` (Optional, { actions, all, use_all })** +- **`trigger_data` (Optional, { scheduled_time, triggered_time })**: This structure is parsed as the data of the trigger event that will be used during the watch execution. +- **`watch` (Optional, { actions, condition, input, metadata, status, throttle_period, throttle_period_in_millis, transform, trigger })**: When present, this watch is used instead of the one specified in the request. +This watch is not persisted to the index and `record_execution` cannot be set. +- **`debug` (Optional, boolean)**: Defines whether the watch runs in debug mode. -* **Request (object):** +## client.watcher.getSettings [_watcher.get_settings] +Get Watcher index settings. +Get settings for the Watcher internal index (`.watches`). +Only a subset of settings are shown, for example `index.auto_expand_replicas` and `index.number_of_replicas`. - * **`id` (Optional, string)**: The watch identifier. - * **`action_modes` (Optional, Record)**: Determines how to handle the watch actions as part of the watch execution. - * **`alternative_input` (Optional, Record)**: When present, the watch uses this object as a payload instead of executing its own input. - * **`ignore_condition` (Optional, boolean)**: When set to `true`, the watch execution uses the always condition. This can also be specified as an HTTP parameter. - * **`record_execution` (Optional, boolean)**: When set to `true`, the watch record representing the watch execution result is persisted to the `.watcher-history` index for the current time. In addition, the status of the watch is updated, possibly throttling subsequent runs. This can also be specified as an HTTP parameter. - * **`simulated_actions` (Optional, { actions, all, use_all })** - * **`trigger_data` (Optional, { scheduled_time, triggered_time })**: This structure is parsed as the data of the trigger event that will be used during the watch execution. - * **`watch` (Optional, { actions, condition, input, metadata, status, throttle_period, throttle_period_in_millis, transform, trigger })**: When present, this watch is used instead of the one specified in the request. This watch is not persisted to the index and `record_execution` cannot be set. - * **`debug` (Optional, boolean)**: Defines whether the watch runs in debug mode. - - - -### get_settings [_get_settings_4] - -Get Watcher index settings. Get settings for the Watcher internal index (`.watches`). Only a subset of settings are shown, for example `index.auto_expand_replicas` and `index.number_of_replicas`. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-get-settings) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-watcher-get-settings) ```ts client.watcher.getSettings({ ... }) ``` +### Arguments [_arguments_watcher.get_settings] -### Arguments [_arguments_478] - -* **Request (object):** - - * **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - - - -### get_watch [_get_watch] +#### Request (object) [_request_watcher.get_settings] +- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +## client.watcher.getWatch [_watcher.get_watch] Get a watch. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-get-watch) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-watcher-get-watch) ```ts client.watcher.getWatch({ id }) ``` +### Arguments [_arguments_watcher.get_watch] -### Arguments [_arguments_479] - -* **Request (object):** - - * **`id` (string)**: The watch identifier. - - - -### put_watch [_put_watch] - -Create or update a watch. When a watch is registered, a new document that represents the watch is added to the `.watches` index and its trigger is immediately registered with the relevant trigger engine. Typically for the `schedule` trigger, the scheduler is the trigger engine. +#### Request (object) [_request_watcher.get_watch] +- **`id` (string)**: The watch identifier. -::::{important} -You must use Kibana or this API to create a watch. Do not add a watch directly to the `.watches` index by using the Elasticsearch index API. If Elasticsearch security features are enabled, do not give users write privileges on the `.watches` index. -:::: +## client.watcher.putWatch [_watcher.put_watch] +Create or update a watch. +When a watch is registered, a new document that represents the watch is added to the `.watches` index and its trigger is immediately registered with the relevant trigger engine. +Typically for the `schedule` trigger, the scheduler is the trigger engine. +IMPORTANT: You must use Kibana or this API to create a watch. +Do not add a watch directly to the `.watches` index by using the Elasticsearch index API. +If Elasticsearch security features are enabled, do not give users write privileges on the `.watches` index. -When you add a watch you can also define its initial active state by setting the **active** parameter. +When you add a watch you can also define its initial active state by setting the *active* parameter. -When Elasticsearch security features are enabled, your watch can index or search only on indices for which the user that stored the watch has privileges. If the user is able to read index `a`, but not index `b`, the same will apply when the watch runs. +When Elasticsearch security features are enabled, your watch can index or search only on indices for which the user that stored the watch has privileges. +If the user is able to read index `a`, but not index `b`, the same will apply when the watch runs. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-put-watch) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-watcher-put-watch) ```ts client.watcher.putWatch({ id }) ``` - -### Arguments [_arguments_480] - -* **Request (object):** - - * **`id` (string)**: The identifier for the watch. - * **`actions` (Optional, Record)**: The list of actions that will be run if the condition matches. - * **`condition` (Optional, { always, array_compare, compare, never, script })**: The condition that defines if the actions should be run. - * **`input` (Optional, { chain, http, search, simple })**: The input that defines the input that loads the data for the watch. - * **`metadata` (Optional, Record)**: Metadata JSON that will be copied into the history entries. - * **`throttle_period` (Optional, string | -1 | 0)**: The minimum time between actions being run. The default is 5 seconds. This default can be changed in the config file with the setting `xpack.watcher.throttle.period.default_period`. If both this value and the `throttle_period_in_millis` parameter are specified, Watcher uses the last parameter included in the request. - * **`throttle_period_in_millis` (Optional, Unit)**: Minimum time in milliseconds between actions being run. Defaults to 5000. If both this value and the throttle_period parameter are specified, Watcher uses the last parameter included in the request. - * **`transform` (Optional, { chain, script, search })**: The transform that processes the watch payload to prepare it for the watch actions. - * **`trigger` (Optional, { schedule })**: The trigger that defines when the watch should run. - * **`active` (Optional, boolean)**: The initial state of the watch. The default value is `true`, which means the watch is active by default. - * **`if_primary_term` (Optional, number)**: only update the watch if the last operation that has changed the watch has the specified primary term - * **`if_seq_no` (Optional, number)**: only update the watch if the last operation that has changed the watch has the specified sequence number - * **`version` (Optional, number)**: Explicit version number for concurrency control - - - -### query_watches [_query_watches] - -Query watches. Get all registered watches in a paginated manner and optionally filter watches by a query. +### Arguments [_arguments_watcher.put_watch] + +#### Request (object) [_request_watcher.put_watch] +- **`id` (string)**: The identifier for the watch. +- **`actions` (Optional, Record)**: The list of actions that will be run if the condition matches. +- **`condition` (Optional, { always, array_compare, compare, never, script })**: The condition that defines if the actions should be run. +- **`input` (Optional, { chain, http, search, simple })**: The input that defines the input that loads the data for the watch. +- **`metadata` (Optional, Record)**: Metadata JSON that will be copied into the history entries. +- **`throttle_period` (Optional, string | -1 | 0)**: The minimum time between actions being run. +The default is 5 seconds. +This default can be changed in the config file with the setting `xpack.watcher.throttle.period.default_period`. +If both this value and the `throttle_period_in_millis` parameter are specified, Watcher uses the last parameter included in the request. +- **`throttle_period_in_millis` (Optional, Unit)**: Minimum time in milliseconds between actions being run. Defaults to 5000. If both this value and the throttle_period parameter are specified, Watcher uses the last parameter included in the request. +- **`transform` (Optional, { chain, script, search })**: The transform that processes the watch payload to prepare it for the watch actions. +- **`trigger` (Optional, { schedule })**: The trigger that defines when the watch should run. +- **`active` (Optional, boolean)**: The initial state of the watch. +The default value is `true`, which means the watch is active by default. +- **`if_primary_term` (Optional, number)**: only update the watch if the last operation that has changed the watch has the specified primary term +- **`if_seq_no` (Optional, number)**: only update the watch if the last operation that has changed the watch has the specified sequence number +- **`version` (Optional, number)**: Explicit version number for concurrency control + +## client.watcher.queryWatches [_watcher.query_watches] +Query watches. +Get all registered watches in a paginated manner and optionally filter watches by a query. Note that only the `_id` and `metadata.*` fields are queryable or sortable. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-query-watches) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-watcher-query-watches) ```ts client.watcher.queryWatches({ ... }) ``` +### Arguments [_arguments_watcher.query_watches] -### Arguments [_arguments_481] - -* **Request (object):** +#### Request (object) [_request_watcher.query_watches] +- **`from` (Optional, number)**: The offset from the first result to fetch. +It must be non-negative. +- **`size` (Optional, number)**: The number of hits to return. +It must be non-negative. +- **`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_grid, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: A query that filters the watches to be returned. +- **`sort` (Optional, string | { _score, _doc, _geo_distance, _script } | string | { _score, _doc, _geo_distance, _script }[])**: One or more fields used to sort the search results. +- **`search_after` (Optional, number | number | string | boolean | null[])**: Retrieve the next page of hits using a set of sort values from the previous page. - * **`from` (Optional, number)**: The offset from the first result to fetch. It must be non-negative. - * **`size` (Optional, number)**: The number of hits to return. It must be non-negative. - * **`query` (Optional, { bool, boosting, common, combined_fields, constant_score, dis_max, distance_feature, exists, function_score, fuzzy, geo_bounding_box, geo_distance, geo_polygon, geo_shape, has_child, has_parent, ids, intervals, knn, match, match_all, match_bool_prefix, match_none, match_phrase, match_phrase_prefix, more_like_this, multi_match, nested, parent_id, percolate, pinned, prefix, query_string, range, rank_feature, regexp, rule, script, script_score, semantic, shape, simple_query_string, span_containing, span_field_masking, span_first, span_multi, span_near, span_not, span_or, span_term, span_within, sparse_vector, term, terms, terms_set, text_expansion, weighted_tokens, wildcard, wrapper, type })**: A query that filters the watches to be returned. - * **`sort` (Optional, string | { _score, _doc, _geo_distance, _script } | string | { _score, _doc, _geo_distance, _script }[])**: One or more fields used to sort the search results. - * **`search_after` (Optional, number | number | string | boolean | null | User-defined value[])**: Retrieve the next page of hits using a set of sort values from the previous page. +## client.watcher.start [_watcher.start] +Start the watch service. +Start the Watcher service if it is not already running. - - -### start [_start_3] - -Start the watch service. Start the Watcher service if it is not already running. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-start) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-watcher-start) ```ts client.watcher.start({ ... }) ``` +### Arguments [_arguments_watcher.start] -### Arguments [_arguments_482] - -* **Request (object):** - - * **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. - - +#### Request (object) [_request_watcher.start] +- **`master_timeout` (Optional, string | -1 | 0)**: Period to wait for a connection to the master node. -### stats [_stats_7] +## client.watcher.stats [_watcher.stats] +Get Watcher statistics. +This API always returns basic metrics. +You retrieve more metrics by using the metric parameter. -Get Watcher statistics. This API always returns basic metrics. You retrieve more metrics by using the metric parameter. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-stats) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-watcher-stats) ```ts client.watcher.stats({ ... }) ``` +### Arguments [_arguments_watcher.stats] -### Arguments [_arguments_483] - -* **Request (object):** - - * **`metric` (Optional, Enum("_all" | "queued_watches" | "current_watches" | "pending_watches") | Enum("_all" | "queued_watches" | "current_watches" | "pending_watches")[])**: Defines which additional metrics are included in the response. - * **`emit_stacktraces` (Optional, boolean)**: Defines whether stack traces are generated for each watch that is running. - +#### Request (object) [_request_watcher.stats] +- **`metric` (Optional, Enum("_all" | "queued_watches" | "current_watches" | "pending_watches") | Enum("_all" | "queued_watches" | "current_watches" | "pending_watches")[])**: Defines which additional metrics are included in the response. +- **`emit_stacktraces` (Optional, boolean)**: Defines whether stack traces are generated for each watch that is running. +## client.watcher.stop [_watcher.stop] +Stop the watch service. +Stop the Watcher service if it is running. -### stop [_stop_3] - -Stop the watch service. Stop the Watcher service if it is running. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-stop) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-watcher-stop) ```ts client.watcher.stop({ ... }) ``` +### Arguments [_arguments_watcher.stop] -### Arguments [_arguments_484] - -* **Request (object):** - - * **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. +#### Request (object) [_request_watcher.stop] +- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for the master node. +If the master node is not available before the timeout expires, the request fails and returns an error. +To indicate that the request should never timeout, set it to `-1`. +## client.watcher.updateSettings [_watcher.update_settings] +Update Watcher index settings. +Update settings for the Watcher internal index (`.watches`). +Only a subset of settings can be modified. +This includes `index.auto_expand_replicas`, `index.number_of_replicas`, `index.routing.allocation.exclude.*`, +`index.routing.allocation.include.*` and `index.routing.allocation.require.*`. +Modification of `index.routing.allocation.include._tier_preference` is an exception and is not allowed as the +Watcher shards must always be in the `data_content` tier. - -### update_settings [_update_settings_2] - -Update Watcher index settings. Update settings for the Watcher internal index (`.watches`). Only a subset of settings can be modified. This includes `index.auto_expand_replicas` and `index.number_of_replicas`. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-update-settings) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-watcher-update-settings) ```ts client.watcher.updateSettings({ ... }) ``` +### Arguments [_arguments_watcher.update_settings] -### Arguments [_arguments_485] - -* **Request (object):** - - * **`index.auto_expand_replicas` (Optional, string)** - * **`index.number_of_replicas` (Optional, number)** - * **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - * **`timeout` (Optional, string | -1 | 0)**: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. - - - -## xpack [_xpack] +#### Request (object) [_request_watcher.update_settings] +- **`index.auto_expand_replicas` (Optional, string)** +- **`index.number_of_replicas` (Optional, number)** +- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +- **`timeout` (Optional, string | -1 | 0)**: The period to wait for a response. +If no response is received before the timeout expires, the request fails and returns an error. - -### info [_info_5] - -Get information. The information provided by the API includes: +## client.xpack.info [_xpack.info] +Get information. +The information provided by the API includes: * Build information including the build number and timestamp. * License information about the currently installed license. * Feature information for the features that are currently enabled and available under the current license. -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-info) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-info) ```ts client.xpack.info({ ... }) ``` +### Arguments [_arguments_xpack.info] -### Arguments [_arguments_486] - -* **Request (object):** - - * **`categories` (Optional, Enum("build" | "features" | "license")[])**: A list of the information categories to include in the response. For example, `build,license,features`. - * **`accept_enterprise` (Optional, boolean)**: If this param is used it must be set to true - * **`human` (Optional, boolean)**: Defines whether additional human-readable information is included in the response. In particular, it adds descriptions and a tag line. +#### Request (object) [_request_xpack.info] +- **`categories` (Optional, Enum("build" | "features" | "license")[])**: A list of the information categories to include in the response. +For example, `build,license,features`. +- **`accept_enterprise` (Optional, boolean)**: If this param is used it must be set to true +- **`human` (Optional, boolean)**: Defines whether additional human-readable information is included in the response. +In particular, it adds descriptions and a tag line. +## client.xpack.usage [_xpack.usage] +Get usage information. +Get information about the features that are currently enabled and available under the current license. +The API also provides some usage statistics. - -### usage [_usage_2] - -Get usage information. Get information about the features that are currently enabled and available under the current license. The API also provides some usage statistics. - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-xpack) +[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/v9/group/endpoint-xpack) ```ts client.xpack.usage({ ... }) ``` +### Arguments [_arguments_xpack.usage] -### Arguments [_arguments_487] - -* **Request (object):** +#### Request (object) [_request_xpack.usage] +- **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. +If no response is received before the timeout expires, the request fails and returns an error. +To indicate that the request should never timeout, set it to `-1`. - * **`master_timeout` (Optional, string | -1 | 0)**: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. diff --git a/docs/reference/basic-config.md b/docs/reference/basic-config.md index dd3452217..7b523cbeb 100644 --- a/docs/reference/basic-config.md +++ b/docs/reference/basic-config.md @@ -5,7 +5,7 @@ mapped_pages: # Basic configuration [basic-config] -This page shows you the possible basic configuration options that the clients offers. +This page explains the basic configuration options for the JavaScript client. ```js const { Client } = require('@elastic/elasticsearch') @@ -18,34 +18,404 @@ const client = new Client({ }) ``` -| | | -| --- | --- | -| `node` or `nodes` | The Elasticsearch endpoint to use.
It can be a single string or an array of strings:

```js
node: 'http://localhost:9200/'
```

Or it can be an object (or an array of objects) that represents the node:

```js
node: {
url: new URL('http://localhost:9200/'),
tls: 'tls options',
agent: 'http agent options',
id: 'custom node id',
headers: { 'custom': 'headers' }
roles: {
master: true,
data: true,
ingest: true,
ml: false
}
}
```
| -| `auth` | Your authentication data. You can use both basic authentication and [ApiKey](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-create-api-key).
See [Authentication](/reference/connecting.md#authentication) for more details.
*Default:* `null`

Basic authentication:

```js
auth: {
username: 'elastic',
password: 'changeme'
}
```

[ApiKey](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-create-api-key) authentication:

```js
auth: {
apiKey: 'base64EncodedKey'
}
```

Bearer authentication, useful for [service account tokens](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-create-service-token). Be aware that it does not handle automatic token refresh:

```js
auth: {
bearer: 'token'
}
```
| -| `maxRetries` | `number` - Max number of retries for each request.
*Default:* `3` | -| `requestTimeout` | `number` - Max request timeout in milliseconds for each request.
*Default:* No value | -| `pingTimeout` | `number` - Max ping request timeout in milliseconds for each request.
*Default:* `3000` | -| `sniffInterval` | `number, boolean` - Perform a sniff operation every `n` milliseconds. Sniffing might not be the best solution for you, take a look [here](https://www.elastic.co/blog/elasticsearch-sniffing-best-practices-what-when-why-how) to know more.
*Default:* `false` | -| `sniffOnStart` | `boolean` - Perform a sniff once the client is started. Sniffing might not be the best solution for you, take a look [here](https://www.elastic.co/blog/elasticsearch-sniffing-best-practices-what-when-why-how) to know more.
*Default:* `false` | -| `sniffEndpoint` | `string` - Endpoint to ping during a sniff.
*Default:* `'_nodes/_all/http'` | -| `sniffOnConnectionFault` | `boolean` - Perform a sniff on connection fault. Sniffing might not be the best solution for you, take a look [here](https://www.elastic.co/blog/elasticsearch-sniffing-best-practices-what-when-why-how) to know more.
*Default:* `false` | -| `resurrectStrategy` | `string` - Configure the node resurrection strategy.
*Options:* `'ping'`, `'optimistic'`, `'none'`
*Default:* `'ping'` | -| `suggestCompression` | `boolean` - Adds `accept-encoding` header to every request.
*Default:* `false` | -| `compression` | `string, boolean` - Enables gzip request body compression.
*Options:* `'gzip'`, `false`
*Default:* `false` | -| `tls` | `http.SecureContextOptions` - tls [configuraton](https://nodejs.org/api/tls.md).
*Default:* `null` | -| `proxy` | `string, URL` - If you are using an http(s) proxy, you can put its url here. The client will automatically handle the connection to it.
*Default:* `null`

```js
const client = new Client({
node: 'http://localhost:9200/',
proxy: 'http://localhost:8080/'
})

const client = new Client({
node: 'http://localhost:9200/',
proxy: 'http://user:pwd@localhost:8080/'
})
```
| -| `agent` | `http.AgentOptions, function` - http agent [options](https://nodejs.org/api/http.md#http_new_agent_options), or a function that returns an actual http agent instance. If you want to disable the http agent use entirely (and disable the `keep-alive` feature), set the agent to `false`.
*Default:* `null`

```js
const client = new Client({
node: 'http://localhost:9200/',
agent: { agent: 'options' }
})

const client = new Client({
node: 'http://localhost:9200/',
// the function takes as parameter the option
// object passed to the Connection constructor
agent: (opts) => new CustomAgent()
})

const client = new Client({
node: 'http://localhost:9200/',
// Disable agent and keep-alive
agent: false
})
```
| -| `nodeFilter` | `function` - Filters which node not to use for a request.
*Default:*

```js
function defaultNodeFilter (node) {
// avoid master only nodes
if (node.roles.master === true &&
node.roles.data === false &&
node.roles.ingest === false) {
return false
}
return true
}
```
| -| `nodeSelector` | `function` - custom selection strategy.
*Options:* `'round-robin'`, `'random'`, custom function
*Default:* `'round-robin'`
*Custom function example:*

```js
function nodeSelector (connections) {
const index = calculateIndex()
return connections[index]
}
```
| -| `generateRequestId` | `function` - function to generate the request id for every request, it takes two parameters, the request parameters and options.
By default it generates an incremental integer for every request.
*Custom function example:*

```js
function generateRequestId (params, options) {
// your id generation logic
// must be syncronous
return 'id'
}
```
| -| `name` | `string, symbol` - The name to identify the client instance in the events.
*Default:* `elasticsearch-js` | -| `opaqueIdPrefix` | `string` - A string that will be use to prefix any `X-Opaque-Id` header.
See [`X-Opaque-Id` support](/reference/observability.md#_x_opaque_id_support) for more details.
_Default:* `null` | -| `headers` | `object` - A set of custom headers to send in every request.
*Default:* `{}` | -| `context` | `object` - A custom object that you can use for observability in your events.It will be merged with the API level context option.
*Default:* `null` | -| `enableMetaHeader` | `boolean` - If true, adds an header named `'x-elastic-client-meta'`, containing some minimal telemetry data,such as the client and platform version.
*Default:* `true` | -| `cloud` | `object` - Custom configuration for connecting to [Elastic Cloud](https://cloud.elastic.co). See [Authentication](/reference/connecting.md) for more details.
*Default:* `null`
*Cloud configuration example:*

```js
const client = new Client({
cloud: {
id: ''
},
auth: {
username: 'elastic',
password: 'changeme'
}
})
```
| -| `disablePrototypePoisoningProtection` | `boolean`, `'proto'`, `'constructor'` - The client can protect you against prototype poisoning attacks. Read [this article](https://web.archive.org/web/20200319091159/https://hueniverse.com/square-brackets-are-the-enemy-ff5b9fd8a3e8?gi=184a27ee2a08) to learn more about this security concern. If needed, you can enable prototype poisoning protection entirely (`false`) or one of the two checks (`'proto'` or `'constructor'`). For performance reasons, it is disabled by default. Read the `secure-json-parse` [documentation](https://github.com/fastify/secure-json-parse) to learn more.
*Default:* `true` | -| `caFingerprint` | `string` - If configured, verify that the fingerprint of the CA certificate that has signed the certificate of the server matches the supplied fingerprint. Only accepts SHA256 digest fingerprints.
*Default:* `null` | -| `maxResponseSize` | `number` - When configured, it verifies that the uncompressed response size is lower than the configured number, if it’s higher it will abort the request. It cannot be higher than buffer.constants.MAX_STRING_LENGTH
*Default:* `null` | -| `maxCompressedResponseSize` | `number` - When configured, it verifies that the compressed response size is lower than the configured number, if it’s higher it will abort the request. It cannot be higher than buffer.constants.MAX_LENGTH
*Default:* `null` | +### `node` or `nodes` +The {{es}} endpoint to use. It can be a single string or an array of strings: + +```js +node: 'http://localhost:9200/' +``` + +```js +nodes: ['http://localhost:9200/', 'http://localhost:9201/'] +``` + +Or it can be an object (or an array of objects) that represents the node: + +```js +node: { + url: new URL('http://localhost:9200/'), + tls: 'tls options', + agent: 'http agent options', + id: 'custom node id', + headers: { 'custom': 'headers' }, + roles: { + master: true, + data: true, + ingest: true, + ml: false + } +} +``` + +--- + +### `auth` + +Default: `null` + +Your authentication data. You can use both basic authentication and [ApiKey](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-create-api-key). + See [Authentication](/reference/connecting.md#authentication) for more details. + +Basic authentication: + +```js +auth: { + username: 'elastic', + password: 'changeme' +} +``` + +[ApiKey](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-create-api-key) authentication: + +```js +auth: { + apiKey: 'base64EncodedKey' +} +``` + +Bearer authentication, useful for [service account tokens](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-create-service-token). Be aware that it does not handle automatic token refresh: + +```js +auth: { + bearer: 'token' +} +``` + +--- + +### `maxRetries` + +Type: `number`
+Default: `3` + +Max number of retries for each request. + +--- + +### `requestTimeout` + +Type: `number`
+Default: `No value` + +Max request timeout in milliseconds for each request. + +--- + +### `pingTimeout` + +Type: `number`
+Default: `3000` + +Max ping request timeout in milliseconds for each request. + +--- + +### `sniffInterval` + +Type: `number, boolean`
+Default: `false` + +Perform a sniff operation every `n` milliseconds. + +:::{tip} +Sniffing might not be the best solution. Before using the various `sniff` options, review this [blog post](https://www.elastic.co/blog/elasticsearch-sniffing-best-practices-what-when-why-how). +::: + +--- + +### `sniffOnStart` + +Type: `boolean`
+Default: `false` + +Perform a sniff once the client is started. Be sure to review the sniffing best practices [blog post](https://www.elastic.co/blog/elasticsearch-sniffing-best-practices-what-when-why-how). + +--- + +### `sniffEndpoint` + +Type: `string`
+Default: `'_nodes/_all/http'` + +Endpoint to ping during a sniff. Be sure to review the sniffing best practices [blog post](https://www.elastic.co/blog/elasticsearch-sniffing-best-practices-what-when-why-how). + +--- + +### `sniffOnConnectionFault` + +Type: `boolean`
+Default: `false` + +Perform a sniff on connection fault. Be sure to review the sniffing best practices [blog post](https://www.elastic.co/blog/elasticsearch-sniffing-best-practices-what-when-why-how). + +--- + +### `resurrectStrategy` + +Type: `string`
+Default: `'ping'` + +Configure the node resurrection strategy.
+Options: `'ping'`, `'optimistic'`, `'none'` + +--- + +### `suggestCompression` + +Type: `boolean`
+Default: `false` + +Adds an `accept-encoding` header to every request. + +--- + +### `compression` + +Type: `string, boolean`
+Default: `false` + +Enables gzip request body compression.
+Options: `'gzip'`, `false` + +--- + +### `tls` + +Type: `http.SecureContextOptions`
+Default: `null` + +The [tls configuraton](https://nodejs.org/api/tls.html). + +--- + +### `proxy` + +Type: `string, URL`
+Default: `null` + +If you are using an http(s) proxy, you can put its url here. The client will automatically handle the connection to it. + +```js +const client = new Client({ + node: 'http://localhost:9200/', + proxy: 'http://localhost:8080/' +}) + +const client = new Client({ + node: 'http://localhost:9200/', + proxy: 'http://user:pwd@localhost:8080/' +}) +``` + +--- + +### `agent` + +Type: `http.AgentOptions, function`
+Default: `null` + +http agent [options](https://nodejs.org/api/http.html#http_new_agent_options), or a function that returns an actual http agent instance. If you want to disable the http agent use entirely (and disable the `keep-alive` feature), set the agent to `false`. + +```js +const client = new Client({ + node: 'http://localhost:9200/', + agent: { agent: 'options' } +}) + +const client = new Client({ + node: 'http://localhost:9200/', + // the function takes as parameter the option + // object passed to the Connection constructor + agent: (opts) => new CustomAgent() +}) + +const client = new Client({ + node: 'http://localhost:9200/', + // Disable agent and keep-alive + agent: false +}) +``` + +--- + +### `nodeFilter` + +Type: `function` + +Filter that indicates whether a node should be used for a request. Default function definition: + +```js +function defaultNodeFilter (node) { + // avoid master only nodes + if (node.roles.master === true && + node.roles.data === false && + node.roles.ingest === false) { + return false + } + return true +} +``` + +--- + +### `nodeSelector` + +Type: `function`
+Default: `'round-robin'` + +Custom selection strategy.
+Options: `'round-robin'`, `'random'`, custom function + +Custom function example: + +```js +function nodeSelector (connections) { + const index = calculateIndex() + return connections[index] +} +``` + +--- + +### `generateRequestId` + +Type: `function`
+ +function to generate the request id for every request, it takes two parameters, the request parameters and options. By default, it generates an incremental integer for every request. + +Custom function example: + +```js +function generateRequestId (params, options) { + // your id generation logic + // must be syncronous + return 'id' +} +``` + +--- + +### `name` + +Type: `string, symbol`
+Default: `elasticsearch-js` + +The name to identify the client instance in the events. + +--- + +### `opaqueIdPrefix` + +Type: `string`
+Default: `null` + +A string that will be use to prefix any `X-Opaque-Id` header. +See [`X-Opaque-Id` support](/reference/observability.md#_x_opaque_id_support) for more details. + +--- + +### `headers` + +Type: `object`
+Default: `{}` + +A set of custom headers to send in every request. + +--- + +### `context` + +Type: `object`
+Default: `null` + +A custom object that you can use for observability in your events. It will be merged with the API level context option. + +--- + +### `enableMetaHeader` + +Type: `boolean`
+Default: `true` + +If true, adds an header named `'x-elastic-client-meta'`, containing some minimal telemetry data, such as the client and platform version. + +--- + +### `cloud` + +Type: `object`
+Default: `null` + +Custom configuration for connecting to [Elastic Cloud](https://cloud.elastic.co). See [Authentication](/reference/connecting.md) for more details. + +Cloud configuration example: + +```js +const client = new Client({ + cloud: { + id: '' + }, + auth: { + username: 'elastic', + password: 'changeme' + } +}) +``` + +--- + +### `disablePrototypePoisoningProtection` + +Default: `true` + +`boolean`, `'proto'`, `'constructor'` - The client can protect you against prototype poisoning attacks. For more information, refer to [Square Brackets are the Enemy](https://web.archive.org/web/20200319091159/https://hueniverse.com/square-brackets-are-the-enemy-ff5b9fd8a3e8?gi=184a27ee2a08). If needed, you can enable prototype poisoning protection entirely (`false`) or one of the two checks (`'proto'` or `'constructor'`). For performance reasons, it is disabled by default. To learn more, refer to the [`secure-json-parse` documentation](https://github.com/fastify/secure-json-parse). + +--- + +### `caFingerprint` + +Type: `string`
+Default: `null` + +If configured, verify that the fingerprint of the CA certificate that has signed the certificate of the server matches the supplied fingerprint. Only accepts SHA256 digest fingerprints. + +--- + +### `maxResponseSize` + +Type: `number`
+Default: `null` + +When configured, `maxResponseSize` verifies that the uncompressed response size is lower than the configured number. If it’s higher, the request will be canceled. The `maxResponseSize` cannot be higher than the value of `buffer.constants.MAX_STRING_LENGTH`. + +--- + +### `maxCompressedResponseSize` + +Type: `number`
+Default: `null` + +When configured, `maxCompressedResponseSize` verifies that the compressed response size is lower than the configured number. If it’s higher, the request will be canceled. The `maxCompressedResponseSize` cannot be higher than the value of `buffer.constants.MAX_STRING_LENGTH`. + +--- + +### `redaction` + +Type: `object`
+Default: A configuration that will replace known sources of sensitive data in `Error` metadata + +Options for how to redact potentially sensitive data from metadata attached to `Error` objects + +::::{note} +[Read about redaction](/reference/advanced-config.md#redaction) for more details +:::: + +--- + +### `serverMode` + +Type: `string`
+Default: `"stack"` + +Setting to `"stack"` sets defaults assuming a traditional (non-serverless) {{es}} instance. Setting to `"serverless"` sets defaults to work more seamlessly with [Elastic Cloud Serverless](https://www.elastic.co/guide/en/serverless/current/intro.html), like enabling compression and disabling features that assume the possibility of multiple {{es}} nodes. diff --git a/docs/reference/client-helpers.md b/docs/reference/client-helpers.md index 38c29198e..2aad979e6 100644 --- a/docs/reference/client-helpers.md +++ b/docs/reference/client-helpers.md @@ -11,15 +11,12 @@ The client comes with an handy collection of helpers to give you a more comforta The client helpers are experimental, and the API may change in the next minor releases. The helpers will not work in any Node.js version lower than 10. :::: - - ## Bulk helper [bulk-helper] Added in `v7.7.0` Running bulk requests can be complex due to the shape of the API, this helper aims to provide a nicer developer experience around the Bulk API. - ### Usage [_usage_3] ```js @@ -67,10 +64,8 @@ To create a new instance of the Bulk helper, access it as shown in the example a | `wait` | How much time to wait before retries in milliseconds.
*Default:* 5000.

```js
const b = client.helpers.bulk({
wait: 3000
})
```
| | `refreshOnCompletion` | If `true`, at the end of the bulk operation it runs a refresh on all indices or on the specified indices.
*Default:* false.

```js
const b = client.helpers.bulk({
refreshOnCompletion: true
// or
refreshOnCompletion: 'index-name'
})
```
| - ### Supported operations [_supported_operations] - #### Index [_index_2] ```js @@ -84,7 +79,6 @@ client.helpers.bulk({ }) ``` - #### Create [_create_4] ```js @@ -98,7 +92,6 @@ client.helpers.bulk({ }) ``` - #### Update [_update_3] ```js @@ -116,7 +109,6 @@ client.helpers.bulk({ }) ``` - #### Delete [_delete_10] ```js @@ -130,7 +122,6 @@ client.helpers.bulk({ }) ``` - ### Abort a bulk operation [_abort_a_bulk_operation] If needed, you can abort a bulk operation at any time. The bulk helper returns a [thenable](https://promisesaplus.com/), which has an `abort` method. @@ -139,7 +130,6 @@ If needed, you can abort a bulk operation at any time. The bulk helper returns a The abort method stops the execution of the bulk operation, but if you are using a concurrency higher than one, the operations that are already running will not be stopped. :::: - ```js const { createReadStream } = require('fs') const split = require('split2') @@ -164,7 +154,6 @@ const b = client.helpers.bulk({ console.log(await b) ``` - ### Passing custom options to the Bulk API [_passing_custom_options_to_the_bulk_api] You can pass any option supported by the link: [Bulk API](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-bulk) to the helper, and the helper uses those options in conjunction with the Bulk API call. @@ -181,7 +170,6 @@ const result = await client.helpers.bulk({ }) ``` - ### Usage with an async generator [_usage_with_an_async_generator] ```js @@ -214,7 +202,6 @@ const result = await client.helpers.bulk({ console.log(result) ``` - ### Modifying a document before operation [_modifying_a_document_before_operation] Added in `v8.8.2` @@ -241,14 +228,12 @@ const result = await client.helpers.bulk({ console.log(result) ``` - ## Multi search helper [multi-search-helper] Added in `v7.8.0` If you send search request at a high rate, this helper might be useful for you. It uses the multi search API under the hood to batch the requests and improve the overall performances of your application. The `result` exposes a `documents` property as well, which allows you to access directly the hits sources. - ### Usage [_usage_4] ```js @@ -278,7 +263,6 @@ To create a new instance of the multi search (msearch) helper, you should access | `retries` | How many times an operation is retried before to resolve the request. An operation is retried only in case of a 429 error.
*Default:* Client max retries.

```js
const m = client.helpers.msearch({
retries: 3
})
```
| | `wait` | How much time to wait before retries in milliseconds.
*Default:* 5000.

```js
const m = client.helpers.msearch({
wait: 3000
})
```
| - ### Stopping the msearch helper [_stopping_the_msearch_helper] If needed, you can stop an msearch processor at any time. The msearch helper returns a [thenable](https://promisesaplus.com/), which has an `stop` method. @@ -291,7 +275,6 @@ The `stop` method accepts an optional error, that will be dispatched every subse The stop method stops the execution of the msearch processor, but if you are using a concurrency higher than one, the operations that are already running will not be stopped. :::: - ```js const { Client } = require('@elastic/elasticsearch') @@ -318,7 +301,6 @@ m.search( setImmediate(() => m.stop()) ``` - ## Search helper [search-helper] Added in `v7.7.0` @@ -340,7 +322,6 @@ for (const doc of documents) { } ``` - ## Scroll search helper [scroll-search-helper] Added in `v7.7.0` @@ -362,7 +343,6 @@ for await (const result of scrollSearch) { } ``` - ### Clear a scroll search [_clear_a_scroll_search] If needed, you can clear a scroll search by calling `result.clear()`: @@ -375,7 +355,6 @@ for await (const result of scrollSearch) { } ``` - ### Quickly getting the documents [_quickly_getting_the_documents] If you only need the documents from the result of a scroll search, you can access them via `result.documents`: @@ -386,7 +365,6 @@ for await (const result of scrollSearch) { } ``` - ## Scroll documents helper [scroll-documents-helper] Added in `v7.7.0` @@ -408,15 +386,12 @@ for await (const doc of scrollSearch) { } ``` - ## ES|QL helper [esql-helper] ES|QL queries can return their results in [several formats](docs-content://explore-analyze/query-filter/languages/esql-rest.md#esql-rest-format). The default JSON format returned by ES|QL queries contains arrays of values for each row, with column names and types returned separately: - ### Usage [_usage_5] - #### `toRecords` [_torecords] Added in `v8.14.0` @@ -494,14 +469,13 @@ const result = await client.helpers .toRecords() ``` - #### `toArrowReader` [_toarrowreader] Added in `v8.16.0` ES|QL can return results in multiple binary formats, including [Apache Arrow](https://arrow.apache.org/)'s streaming format. Because it is a very efficient format to read, it can be valuable for performing high-performance in-memory analytics. And, because the response is streamed as batches of records, it can be used to produce aggregations and other calculations on larger-than-memory data sets. -`toArrowReader` returns a [`RecordBatchStreamReader`](https://arrow.apache.org/docs/js/classes/Arrow_dom.RecordBatchReader.md). +`toArrowReader` returns an [`AsyncRecordBatchStreamReader`](https://github.com/apache/arrow/blob/520ae44272d491bbb52eb3c9b84864ed7088f11a/js/src/ipc/reader.ts#L216). ```ts const reader = await client.helpers @@ -509,14 +483,13 @@ const reader = await client.helpers .toArrowReader() // print each record as JSON -for (const recordBatch of reader) { +for await (const recordBatch of reader) { for (const record of recordBatch) { console.log(record.toJSON()) } } ``` - #### `toArrowTable` [_toarrowtable] Added in `v8.16.0` diff --git a/docs/reference/configuration.md b/docs/reference/configuration.md index 0367bdc12..744743ef6 100644 --- a/docs/reference/configuration.md +++ b/docs/reference/configuration.md @@ -7,13 +7,8 @@ mapped_pages: The client is designed to be easily configured for your needs. In the following section, you can see the possible options that you can use to configure it. -* [Basic configuration](/reference/basic-config.md) -* [Advanced configuration](/reference/advanced-config.md) -* [Timeout best practices](docs-content://troubleshoot/elasticsearch/elasticsearch-client-javascript-api/nodejs.md) -* [Creating a child client](/reference/child.md) -* [Testing](/reference/client-testing.md) - - - - - +- [Basic configuration](/reference/basic-config.md) +- [Advanced configuration](/reference/advanced-config.md) +- [Timeout best practices](/reference/timeout-best-practices.md) +- [Creating a child client](/reference/child.md) +- [Testing](/reference/client-testing.md) \ No newline at end of file diff --git a/docs/reference/connecting.md b/docs/reference/connecting.md index 34f85e65c..76910d509 100644 --- a/docs/reference/connecting.md +++ b/docs/reference/connecting.md @@ -11,7 +11,6 @@ This page contains the information you need to connect and use the Client with { This document contains code snippets to show you how to connect to various {{es}} providers. - ### Elastic Cloud [auth-ec] If you are using [Elastic Cloud](https://www.elastic.co/cloud), the client offers an easy way to connect to it via the `cloud` option. You must pass the Cloud ID that you can find in the cloud console, then your username and password inside the `auth` option. @@ -20,12 +19,10 @@ If you are using [Elastic Cloud](https://www.elastic.co/cloud), the client offer When connecting to Elastic Cloud, the client will automatically enable both request and response compression by default, since it yields significant throughput improvements. Moreover, the client will also set the tls option `secureProtocol` to `TLSv1_2_method` unless specified otherwise. You can still override this option by configuring them. :::: - ::::{important} Do not enable sniffing when using Elastic Cloud, since the nodes are behind a load balancer, Elastic Cloud will take care of everything for you. Take a look [here](https://www.elastic.co/blog/elasticsearch-sniffing-best-practices-what-when-why-how) to know more. :::: - ```js const { Client } = require('@elastic/elasticsearch') const client = new Client({ @@ -39,6 +36,24 @@ const client = new Client({ }) ``` +## Connecting to an Elastic Cloud Serverless instance [connect-serverless] + +The Node.js client is built to support connecting to [Elastic Cloud Serverless](https://www.elastic.co/guide/en/serverless/current/intro.html). By setting the `serverMode` option to `"serverless"`, several default options will be modified to better suit the serverless environment. + +```js +const { Client } = require('@elastic/elasticsearch') +const client = new Client({ + cloud: { + id: '' + }, + auth: { + username: 'elastic', + password: 'changeme' + }, + serverMode: 'serverless' +}) + +``` ## Connecting to a self-managed cluster [connect-self-managed-new] @@ -62,7 +77,6 @@ When you start {{es}} for the first time you’ll see a distinct block like the Depending on the circumstances there are two options for verifying the HTTPS connection, either verifying with the CA certificate itself or via the HTTP CA certificate fingerprint. - ### TLS configuration [auth-tls] The generated root CA certificate can be found in the `certs` directory in your {{es}} config location (`$ES_CONF_PATH/certs/http_ca.crt`). If you’re running {{es}} in Docker there is [additional documentation for retrieving the CA certificate](docs-content://deploy-manage/deploy/self-managed/install-elasticsearch-with-docker.md). @@ -84,7 +98,6 @@ const client = new Client({ }) ``` - ### CA fingerprint [auth-ca-fingerprint] You can configure the client to only trust certificates that are signed by a specific CA certificate (CA certificate pinning) by providing a `caFingerprint` option. This will verify that the fingerprint of the CA certificate that has signed the certificate of the server matches the supplied value. You must configure a SHA256 digest. @@ -125,14 +138,12 @@ The output of `openssl x509` will look something like this: SHA256 Fingerprint=A5:2D:D9:35:11:E8:C6:04:5E:21:F1:66:54:B7:7C:9E:E0:F3:4A:EA:26:D9:F4:03:20:B5:31:C4:74:67:62:28 ``` - ## Connecting without security enabled [connect-no-security] ::::{warning} Running {{es}} without security enabled is not recommended. :::: - If your cluster is configured with [security explicitly disabled](elasticsearch://reference/elasticsearch/configuration-reference/security-settings.md) then you can connect via HTTP: ```js @@ -142,12 +153,10 @@ const client = new Client({ }) ``` - ## Authentication strategies [auth-strategies] Following you can find all the supported authentication strategies. - ### ApiKey authentication [auth-apikey] You can use the [ApiKey](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-create-api-key) authentication by passing the `apiKey` parameter via the `auth` option. The `apiKey` parameter can be either a base64 encoded string or an object with the values that you can obtain from the [create api key endpoint](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-create-api-key). @@ -156,7 +165,6 @@ You can use the [ApiKey](https://www.elastic.co/docs/api/doc/elasticsearch/opera If you provide both basic authentication credentials and the ApiKey configuration, the ApiKey takes precedence. :::: - ```js const { Client } = require('@elastic/elasticsearch') const client = new Client({ @@ -180,7 +188,6 @@ const client = new Client({ }) ``` - ### Bearer authentication [auth-bearer] You can provide your credentials by passing the `bearer` token parameter via the `auth` option. Useful for [service account tokens](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-create-service-token). Be aware that it does not handle automatic token refresh. @@ -195,7 +202,6 @@ const client = new Client({ }) ``` - ### Basic authentication [auth-basic] You can provide your credentials by passing the `username` and `password` parameters via the `auth` option. @@ -204,7 +210,6 @@ You can provide your credentials by passing the `username` and `password` parame If you provide both basic authentication credentials and the Api Key configuration, the Api Key will take precedence. :::: - ```js const { Client } = require('@elastic/elasticsearch') const client = new Client({ @@ -225,7 +230,6 @@ const client = new Client({ }) ``` - ## Usage [client-usage] Using the client is straightforward, it supports all the public APIs of {{es}}, and every method exposes the same signature. @@ -278,8 +282,6 @@ In this case, the result will be: The body is a boolean value when you use `HEAD` APIs. :::: - - ### Aborting a request [_aborting_a_request] If needed, you can abort a running request by using the `AbortController` standard. @@ -288,7 +290,6 @@ If needed, you can abort a running request by using the `AbortController` standa If you abort a request, the request will fail with a `RequestAbortedError`. :::: - ```js const AbortController = require('node-abort-controller') const { Client } = require('@elastic/elasticsearch') @@ -308,7 +309,6 @@ const result = await client.search({ }, { signal: abortController.signal }) ``` - ### Request specific options [_request_specific_options] If needed you can pass request specific options in a second object: @@ -352,7 +352,6 @@ The supported request specific options are: This section illustrates the best practices for leveraging the {{es}} client in a Function-as-a-Service (FaaS) environment. The most influential optimization is to initialize the client outside of the function, the global scope. This practice does not only improve performance but also enables background functionality as – for example – [sniffing](https://www.elastic.co/blog/elasticsearch-sniffing-best-practices-what-when-why-how). The following examples provide a skeleton for the best practices. - ### GCP Cloud Functions [_gcp_cloud_functions] ```js @@ -369,7 +368,6 @@ exports.testFunction = async function (req, res) { } ``` - ### AWS Lambda [_aws_lambda] ```js @@ -386,7 +384,6 @@ exports.handler = async function (event, context) { } ``` - ### Azure Functions [_azure_functions] ```js @@ -410,7 +407,6 @@ Resources used to assess these recommendations: * [Azure Functions Python developer guide](https://docs.microsoft.com/en-us/azure/azure-functions/functions-reference-python?tabs=azurecli-linux%2Capplication-level#global-variables) * [AWS Lambda: Comparing the effect of global scope](https://docs.aws.amazon.com/lambda/latest/operatorguide/global-scope.md) - ## Connecting through a proxy [client-connect-proxy] Added in `v7.10.0` @@ -421,7 +417,6 @@ If you need to pass through an http(s) proxy for connecting to {{es}}, the clien In versions 8.0+ of the client, the default `Connection` type is set to `UndiciConnection`, which does not support proxy configurations. To use a proxy, you will need to use the `HttpConnection` class from `@elastic/transport` instead. :::: - ```js import { HttpConnection } from '@elastic/transport' @@ -455,7 +450,6 @@ const client = new Client({ }) ``` - ## Error handling [client-error-handling] The client exposes a variety of error objects that you can use to enhance your error handling. You can find all the error objects inside the `errors` key in the client. @@ -506,7 +500,6 @@ const client = new Client({ }) ``` - ## Closing a client’s connections [close-connections] If you would like to close all open connections being managed by an instance of the client, use the `close()` function: @@ -518,7 +511,6 @@ const client = new Client({ client.close(); ``` - ## Automatic product check [product-check] Since v7.14.0, the client performs a required product check before the first call. This pre-flight product check allows the client to establish the version of Elasticsearch that it is communicating with. The product check requires one additional HTTP request to be sent to the server as part of the request pipeline before the main API call is sent. In most cases, this will succeed during the very first API call that the client sends. Once the product check completes, no further product check HTTP requests are sent for subsequent API calls. diff --git a/docs/reference/getting-started.md b/docs/reference/getting-started.md index 59b290037..61f2dabfb 100644 --- a/docs/reference/getting-started.md +++ b/docs/reference/getting-started.md @@ -45,15 +45,11 @@ const client = new Client({ Your Elasticsearch endpoint can be found on the **My deployment** page of your deployment: -:::{image} ../images/es-endpoint.jpg -:alt: Finding Elasticsearch endpoint -::: +![Finding Elasticsearch endpoint](images/es-endpoint.jpg) You can generate an API key on the **Management** page under Security. -:::{image} ../images/create-api-key.png -:alt: Create API key -::: +![Create API key](images/create-api-key.png) For other connection options, refer to the [*Connecting*](/reference/connecting.md) section. diff --git a/docs/images/create-api-key.png b/docs/reference/images/create-api-key.png similarity index 100% rename from docs/images/create-api-key.png rename to docs/reference/images/create-api-key.png diff --git a/docs/images/es-endpoint.jpg b/docs/reference/images/es-endpoint.jpg similarity index 100% rename from docs/images/es-endpoint.jpg rename to docs/reference/images/es-endpoint.jpg diff --git a/docs/reference/timeout-best-practices.md b/docs/reference/timeout-best-practices.md new file mode 100644 index 000000000..8bb66f961 --- /dev/null +++ b/docs/reference/timeout-best-practices.md @@ -0,0 +1,13 @@ +--- +mapped_pages: + - https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/timeout-best-practices.html +--- + +# Timeout best practices [timeout-best-practices] + +Starting in 9.0.0, this client is configured to not time out any HTTP request by default. {{es}} will always eventually respond to any request, even if it takes several minutes. Reissuing a request that it has not responded to yet can cause performance side effects. See the [official {{es}} recommendations for HTTP clients](elasticsearch://reference/elasticsearch/configuration-reference/networking-settings.md#_http_client_configuration) for more information. + +Prior to 9.0, this client was configured by default to operate like many HTTP client libraries do, by using a relatively short (30 second) timeout on all requests sent to {{es}}, raising a `TimeoutError` when that time period elapsed without receiving a response. + +If you need to set timeouts on Elasticsearch requests, setting the `requestTimeout` value to a millisecond value will cause this client to operate as it did prior to 9.0. + diff --git a/docs/reference/toc.yml b/docs/reference/toc.yml index 3896c1fde..9fbda6f58 100644 --- a/docs/reference/toc.yml +++ b/docs/reference/toc.yml @@ -31,4 +31,5 @@ toc: - file: update_examples.md - file: update_by_query_examples.md - file: reindex_examples.md - - file: client-helpers.md \ No newline at end of file + - file: client-helpers.md + - file: timeout-best-practices.md \ No newline at end of file diff --git a/docs/release-notes/breaking-changes.md b/docs/release-notes/breaking-changes.md index 9758cf808..326fafdaf 100644 --- a/docs/release-notes/breaking-changes.md +++ b/docs/release-notes/breaking-changes.md @@ -1,28 +1,55 @@ --- -navigation_title: "Elasticsearch JavaScript Client" +navigation_title: "Breaking changes" --- # Elasticsearch JavaScript Client breaking changes [elasticsearch-javascript-client-breaking-changes] -Before you upgrade, carefully review the Elasticsearch JavaScript Client breaking changes and take the necessary steps to mitigate any issues. -To learn how to upgrade, check out . +Breaking changes can impact your Elastic applications, potentially disrupting normal operations. Before you upgrade, carefully review the Elasticsearch JavaScript Client breaking changes and take the necessary steps to mitigate any issues. To learn how to upgrade, check [Upgrade](docs-content://deploy-manage/upgrade.md). % ## Next version [elasticsearch-javascript-client-versionnext-breaking-changes] -% **Release date:** Month day, year -% ::::{dropdown} Title of breaking change +% ::::{dropdown} Title of breaking change % Description of the breaking change. % For more information, check [PR #](PR link). % **Impact**
Impact of the breaking change. % **Action**
Steps for mitigating deprecation impact. % :::: -% ## 9.0.0 [elasticsearch-javascript-client-900-breaking-changes] -% **Release date:** March 25, 2025 +## 9.0.0 [elasticsearch-javascript-client-900-breaking-changes] -% ::::{dropdown} Title of breaking change -% Description of the breaking change. -% For more information, check [PR #](PR link). -% **Impact**
Impact of the breaking change. -% **Action**
Steps for mitigating deprecation impact. -% :::: \ No newline at end of file +::::{dropdown} Changes to the optional body property + +In 8.x, every API function had a `body` property that would provide a place to put arbitrary values that should go in the HTTP request body, even if they were not noted in the specification or documentation. In 9.0, each API function still includes an optional `body` property, but TypeScript's type checker will disallow properties that should go in the root of the object. A `querystring` parameter has also been added that behaves the same as `body`, but inserts its values into the request querystring. + +**Impact**
Some adjustments to API calls may be necessary for code that used a `body` property 8.x, especially to appease the TypeScript type checker, but it should not have any impact on any code that was not using a `body` property. + +**Action**
Check existing code for use of the `body` property, and move any properties that should be in the root object according to the API function's request type definition. If using TypeScript, the TypeScript type checker will surface most of these issues for you. +:::: + +::::{dropdown} Changes to API parameter collation into an HTTP request + +The logic for where each parameter in an API function call should be added to its HTTP request has been updated: + +1. If recognized as a `body` parameter according to the Elasticsearch specification, put it in the JSON body +2. If recognized as a `path` parameter, put it in the URL path +3. If recognized as a `query` parameter or a "common" query parameter (e.g. `pretty`, `error_trace`), put it in the querystring +4. If not recognized, and this API accepts a JSON body, put it in the JSON body +5. If not recognized and this API does not accept a JSON body, put it in the querystring + +The first two steps are identical in 8.x. The final three steps replace the logic from 8.x that put all unrecognized parameters in the querystring. + +**Impact**
Some parameters that were sent via querystring to Elasticsearch may be sent in the JSON request body, and vice versa. + +**Action**
If Elasticsearch sends back an error response due to a request not being valid, verify with the client's TypeScript type definitions, or via the docs, that the parameters your code passes are correct. +:::: + +::::{dropdown} Removal of the default 30-second timeout on all API calls + +The default 30-second timeout on all HTTP requests sent to Elasticsearch has been dropped in favor of having no timeout set at all. The previous behavior still works as it did when setting the `requestTimeout` value. + +See pull request [#2573](https://github.com/elastic/elasticsearch-js/pull/2573) for more information. + +**Impact**
Requests to Elasticsearch that used to time out after 30 seconds will now wait for as long as it takes for Elasticsearch to respond. + +**Action**
In environments where it is not ideal to wait for an API response indefinitely, manually setting the `requestTimeout` option when instantiating the client still works as it did in 8.x. +:::: diff --git a/docs/release-notes/deprecations.md b/docs/release-notes/deprecations.md index fef8650dd..a137fb0cf 100644 --- a/docs/release-notes/deprecations.md +++ b/docs/release-notes/deprecations.md @@ -1,24 +1,17 @@ --- -navigation_title: "Elasticsearch JavaScript Client" +navigation_title: "Deprecations" --- # Elasticsearch JavaScript Client deprecations [elasticsearch-javascript-client-deprecations] -Review the deprecated functionality for your Elasticsearch JavaScript Client version. While deprecations have no immediate impact, we strongly encourage you update your implementation after you upgrade. +Over time, certain Elastic functionality becomes outdated and is replaced or removed. To help with the transition, Elastic deprecates functionality for a period before removal, giving you time to update your applications. -To learn how to upgrade, check out . +Review the deprecated functionality for the Elasticsearch JavaScript Client. While deprecations have no immediate impact, we strongly encourage you update your implementation after you upgrade. To learn how to upgrade, check out [Upgrade](docs-content://deploy-manage/upgrade.md). -% ## Next version -% **Release date:** Month day, year +## 9.0.0 [elasticsearch-javascript-client-900-deprecations] -% ::::{dropdown} Deprecation title -% Description of the deprecation. -% For more information, check [PR #](PR link). -% **Impact**
Impact of deprecation. -% **Action**
Steps for mitigating deprecation impact. -% :::: +_No deprecations_ -% ## 9.0.0 [elasticsearch-javascript-client-900-deprecations] -% **Release date:** March 25, 2025 +% ## Next version % ::::{dropdown} Deprecation title % Description of the deprecation. diff --git a/docs/release-notes/index.md b/docs/release-notes/index.md index 071841de1..85b8d67c0 100644 --- a/docs/release-notes/index.md +++ b/docs/release-notes/index.md @@ -1,27 +1,43 @@ --- navigation_title: "Elasticsearch JavaScript Client" +mapped_pages: + - https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/changelog-client.html --- # Elasticsearch JavaScript Client release notes [elasticsearch-javascript-client-release-notes] -Review the changes, fixes, and more in each version of Elasticsearch JavaScript Client. +Review the changes, fixes, and more in each version of Elasticsearch JavaScript Client. To check for security updates, go to [Security announcements for the Elastic stack](https://discuss.elastic.co/c/announcements/security-announcements/31). -% Release notes include only features, enhancements, and fixes. Add breaking changes, deprecations, and known issues to the applicable release notes sections. +% Release notes include only features, enhancements, and fixes. Add breaking changes, deprecations, and known issues to the applicable release notes sections. % ## version.next [elasticsearch-javascript-client-next-release-notes] + % **Release date:** Month day, year % ### Features and enhancements [elasticsearch-javascript-client-next-features-enhancements] -% * % ### Fixes [elasticsearch-javascript-client-next-fixes] -% * ## 9.0.0 [elasticsearch-javascript-client-900-release-notes] -**Release date:** March 25, 2025 + +**Release date:** April 8, 2025 ### Features and enhancements [elasticsearch-javascript-client-900-features-enhancements] -### Fixes [elasticsearch-javascript-client-900-fixes] \ No newline at end of file +- **Compatibility with Elasticsearch 9.0:** All changes and additions to Elasticsearch APIs for its 9.0 release are reflected in this release. +- **Serverless client merged in:** the `@elastic/elasticsearch-serverless` client is being deprecated, and its functionality has been merged back into this client. This should have zero impact on the way the client works by default, except that a new `serverMode` option has been added. When it's explicitly set to `"serverless"` by a user, a few default settings and behaviors are changed: + + - turns off sniffing and ignores any sniffing-related options + - ignores all nodes passed in config except the first one, and ignores any node filtering and selecting options + - enables compression and `TLSv1_2_method` (same as when configured for Elastic Cloud) + - adds an `elastic-api-version` HTTP header to all requests + - uses `CloudConnectionPool` by default instead of `WeightedConnectionPool` + - turns off vendored `content-type` and `accept` headers in favor or standard MIME types + + Docstrings for types that differ between stack and serverless have also been updated to indicate when that is the case. + +- **Improved Cloud ID parsing:** when using a Cloud ID as the `cloud` parameter to instantiate the client, that ID was assumed to be in the correct format. New assertions have been added to verify that format and throw a `ConfigurationError` if it is invalid. See [#2694](https://github.com/elastic/elasticsearch-js/issues/2694). + +% ### Fixes [elasticsearch-javascript-client-900-fixes] diff --git a/docs/release-notes/known-issues.md b/docs/release-notes/known-issues.md index 16ca9fb3c..e35bd7826 100644 --- a/docs/release-notes/known-issues.md +++ b/docs/release-notes/known-issues.md @@ -1,10 +1,14 @@ --- -navigation_title: "Elasticsearch JavaScript Client" +navigation_title: "Known issues" --- # Elasticsearch JavaScript Client known issues [elasticsearch-javascript-client-known-issues] +## 9.0.0 + +_No known issues_ + % Use the following template to add entries to this page. % :::{dropdown} Title of known issue @@ -17,4 +21,4 @@ navigation_title: "Elasticsearch JavaScript Client" % **Resolved** % On [Month/Day/Year], this issue was resolved. -::: \ No newline at end of file +% ::: \ No newline at end of file diff --git a/index.d.ts b/index.d.ts index 89be0131c..12d5eb23e 100644 --- a/index.d.ts +++ b/index.d.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ import Client from './lib/client' diff --git a/index.js b/index.js index 0bf3da3da..eb12ae5f4 100644 --- a/index.js +++ b/index.js @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ 'use strict' diff --git a/package.json b/package.json index 377ba7b6b..739e93203 100644 --- a/package.json +++ b/package.json @@ -1,7 +1,7 @@ { "name": "@elastic/elasticsearch", - "version": "9.0.0-alpha.3", - "versionCanary": "9.0.0-canary.0", + "version": "9.0.1", + "versionCanary": "9.0.1-canary.0", "description": "The official Elasticsearch client for Node.js", "main": "./index.js", "types": "index.d.ts", @@ -18,10 +18,12 @@ "test:coverage-100": "npm run build && tap --coverage --100", "test:coverage-report": "npm run build && tap --coverage && nyc report --reporter=text-lcov > coverage.lcov", "test:coverage-ui": "npm run build && tap --coverage --coverage-report=html", - "test:integration": "tsc && node test/integration/index.js", + "test:integration-build": "npm run build && node test/integration/index.js", + "test:integration": "npm run test:integration-build && env tap run --jobs=1 --reporter=junit --reporter-file=report-junit.xml generated-tests/", "lint": "ts-standard src", "lint:fix": "ts-standard --fix src", "license-checker": "license-checker --production --onlyAllow='MIT;Apache-2.0;Apache1.1;ISC;BSD-3-Clause;BSD-2-Clause;0BSD'", + "license-header": "./scripts/check-spdx", "prebuild": "npm run clean-build && npm run lint", "build": "tsc && rm lib/package.json && mv lib/src/* lib/ && rm -rf lib/src", "clean-build": "rimraf ./lib && mkdir lib", @@ -76,8 +78,8 @@ "node-fetch": "2.7.0", "ora": "5.4.1", "proxy": "1.0.2", - "rimraf": "3.0.2", - "semver": "7.6.3", + "rimraf": "5.0.10", + "semver": "7.7.1", "split2": "4.2.0", "stoppable": "1.1.0", "tap": "21.0.2", @@ -89,8 +91,8 @@ "zx": "7.2.3" }, "dependencies": { - "@elastic/transport": "9.0.0-alpha.1", - "apache-arrow": "^18.0.0", + "@elastic/transport": "^9.0.1", + "apache-arrow": "18.x - 19.x", "tslib": "^2.4.0" }, "tap": { diff --git a/scripts/check-spdx b/scripts/check-spdx new file mode 100755 index 000000000..c60d600e8 --- /dev/null +++ b/scripts/check-spdx @@ -0,0 +1,27 @@ +#!/usr/bin/env bash + +# Copyright Elasticsearch B.V. and contributors +# SPDX-License-Identifier: Apache-2.0 + +correct='/* + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 + */' + +the_exit=0 + +check_file() { + if $(diff <(head -n4 "$1") <(echo "$correct") &>/dev/null); then + echo "Correct: $1" + else + echo "Incorrect: $1" + the_exit=1 + fi +} + +echo "SPDX license header check" +for file in $(git ls-files | grep -E '\.(ts|js|mjs)$'); do + check_file "$file" +done + +exit "$the_exit" diff --git a/scripts/download-artifacts.js b/scripts/download-artifacts.js index 9618838be..c15ed4ae1 100644 --- a/scripts/download-artifacts.js +++ b/scripts/download-artifacts.js @@ -1,178 +1,104 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ -'use strict' - const { join } = require('path') -const minimist = require('minimist') const stream = require('stream') const { promisify } = require('util') const { createWriteStream, promises } = require('fs') -const rimraf = require('rimraf') +const { rimraf } = require('rimraf') const fetch = require('node-fetch') const crossZip = require('cross-zip') const ora = require('ora') -const { mkdir, writeFile } = promises +const { mkdir, cp } = promises const pipeline = promisify(stream.pipeline) const unzip = promisify(crossZip.unzip) -const rm = promisify(rimraf) - -const esFolder = join(__dirname, '..', 'elasticsearch') -const zipFolder = join(esFolder, 'artifacts.zip') -const specFolder = join(esFolder, 'rest-api-spec', 'api') -const freeTestFolder = join(esFolder, 'rest-api-spec', 'test', 'free') -const xPackTestFolder = join(esFolder, 'rest-api-spec', 'test', 'platinum') -const artifactInfo = join(esFolder, 'info.json') - -async function downloadArtifacts (opts) { - if (typeof opts.version !== 'string') { - throw new Error('Missing version') - } - const log = ora('Checking out spec and test').start() +const testYamlFolder = join(__dirname, '..', 'yaml-rest-tests') +const zipFile = join(__dirname, '..', 'elasticsearch-clients-tests.zip') - log.text = 'Resolving versions' - let resolved - try { - resolved = await resolve(opts.version, opts.hash) - } catch (err) { - log.fail(err.message) - process.exit(1) - } +const schemaFolder = join(__dirname, '..', 'schema') +const schemaJson = join(schemaFolder, 'schema.json') - opts.id = opts.id || resolved.id - opts.hash = opts.hash || resolved.hash - opts.version = resolved.version +async function downloadArtifacts (localTests, version = 'main') { + const log = ora('Checking out spec and test').start() - const info = loadInfo() + const { GITHUB_TOKEN } = process.env - if (info && info.version === opts.version) { - if (info.hash === opts.hash && info.id === opts.id) { - log.succeed('The artifact copy present locally is already up to date') - return - } + if (version !== 'main') { + version = version.split('.').slice(0, 2).join('.') } - log.text = 'Cleanup checkouts/elasticsearch' - await rm(esFolder) - await mkdir(esFolder, { recursive: true }) + log.text = 'Clean tests folder' + await rimraf(testYamlFolder) + await mkdir(testYamlFolder, { recursive: true }) - log.text = 'Downloading artifacts' - const response = await fetch(resolved.url) - if (!response.ok) { - log.fail(`unexpected response ${response.statusText}`) - process.exit(1) - } - await pipeline(response.body, createWriteStream(zipFolder)) + log.text = `Fetch test YAML files for version ${version}` - log.text = 'Unzipping' - await unzip(zipFolder, esFolder) + if (localTests) { + log.text = `Copying local tests from ${localTests}` + await cp(localTests, testYamlFolder, { recursive: true }) + } else { + if (!GITHUB_TOKEN) { + log.fail("Missing required environment variable 'GITHUB_TOKEN'") + process.exit(1) + } - log.text = 'Cleanup' - await rm(zipFolder) + const response = await fetch(`https://api.github.com/repos/elastic/elasticsearch-clients-tests/zipball/${version}`, { + headers: { + Authorization: `Bearer ${GITHUB_TOKEN}`, + Accept: 'application/vnd.github+json' + } + }) - log.text = 'Update info' - await writeFile(artifactInfo, JSON.stringify(opts), 'utf8') + if (!response.ok) { + log.fail(`unexpected response ${response.statusText}`) + process.exit(1) + } - log.succeed('Done') -} + log.text = 'Downloading tests zipball' + await pipeline(response.body, createWriteStream(zipFile)) -function loadInfo () { - try { - return require(artifactInfo) - } catch (err) { - return null - } -} + log.text = 'Unzipping tests' + await unzip(zipFile, testYamlFolder) -async function resolve (version, hash) { - const response = await fetch(`https://artifacts-api.elastic.co/v1/versions/${version}`) - if (!response.ok) { - throw new Error(`unexpected response ${response.statusText}`) + log.text = 'Cleanup' + await rimraf(zipFile) } - const data = await response.json() - const esBuilds = data.version.builds - .filter(build => build.projects.elasticsearch != null) - .map(build => { - return { - projects: build.projects.elasticsearch, - buildId: build.build_id, - date: build.start_time, - version: build.version - } - }) - .sort((a, b) => { - const dA = new Date(a.date) - const dB = new Date(b.date) - if (dA > dB) return -1 - if (dA < dB) return 1 - return 0 - }) + log.text = 'Fetching Elasticsearch specification' + await rimraf(schemaFolder) + await mkdir(schemaFolder, { recursive: true }) - if (hash != null) { - const build = esBuilds.find(build => build.projects.commit_hash === hash) - if (!build) { - throw new Error(`Can't find any build with hash '${hash}'`) - } - const zipKey = Object.keys(build.projects.packages).find(key => key.startsWith('rest-resources-zip-') && key.endsWith('.zip')) - return { - url: build.projects.packages[zipKey].url, - id: build.buildId, - hash: build.projects.commit_hash, - version: build.version - } + const response = await fetch(`https://raw.githubusercontent.com/elastic/elasticsearch-specification/${version}/output/schema/schema.json`) + if (!response.ok) { + log.fail(`unexpected response ${response.statusText}`) + process.exit(1) } - const lastBuild = esBuilds[0] - const zipKey = Object.keys(lastBuild.projects.packages).find(key => key.startsWith('rest-resources-zip-') && key.endsWith('.zip')) - return { - url: lastBuild.projects.packages[zipKey].url, - id: lastBuild.buildId, - hash: lastBuild.projects.commit_hash, - version: lastBuild.version - } + log.text = 'Downloading schema.json' + await pipeline(response.body, createWriteStream(schemaJson)) + + log.succeed('Done') } -async function main (options) { - delete options._ - await downloadArtifacts(options) +async function main () { + await downloadArtifacts() } + if (require.main === module) { process.on('unhandledRejection', function (err) { console.error(err) process.exit(1) }) - const options = minimist(process.argv.slice(2), { - string: ['id', 'version', 'hash'] - }) - main(options).catch(t => { + main().catch(t => { console.log(t) process.exit(2) }) } module.exports = downloadArtifacts -module.exports.locations = { - specFolder, - freeTestFolder, - xPackTestFolder -} +module.exports.locations = { testYamlFolder, zipFile, schemaJson } diff --git a/scripts/generate-docs-examples.js b/scripts/generate-docs-examples.js index a9c229095..8026547c3 100644 --- a/scripts/generate-docs-examples.js +++ b/scripts/generate-docs-examples.js @@ -1,26 +1,12 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ const { join } = require('path') const { writeFile } = require('fs/promises') const fetch = require('node-fetch') -const rimraf = require('rimraf') +const { rimraf } = require('rimraf') const ora = require('ora') const { convertRequests } = require('@elastic/request-converter') const minimist = require('minimist') @@ -91,6 +77,7 @@ ${source.trim()} } const options = minimist(process.argv.slice(2), { + boolean: ['debug'], string: ['version'], default: { version: 'master' @@ -102,7 +89,7 @@ generate(options.version) .catch(err => log.fail(err.message)) .finally(() => { const keys = Object.keys(failures) - if (keys.length > 0) { + if (keys.length > 0 && options.debug) { let message = 'Some examples failed to generate:\n\n' for (const key of keys) { message += `${key}: ${failures[key]}\n` diff --git a/scripts/generate.js b/scripts/generate.js deleted file mode 100644 index ad6fc71cb..000000000 --- a/scripts/generate.js +++ /dev/null @@ -1,143 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -'use strict' - -const { join } = require('path') -const { readdirSync, writeFileSync, readFileSync } = require('fs') -const minimist = require('minimist') -const ora = require('ora') -const rimraf = require('rimraf') -const standard = require('standard') -const downloadArtifacts = require('./download-artifacts') -const { - generate, - genFactory, - generateDocs, - generateRequestTypes -} = require('./utils') - -start(minimist(process.argv.slice(2), { - string: ['version', 'hash'] -})) - -function start (opts) { - if (opts.version == null) { - console.error('Missing version parameter') - process.exit(1) - } - - const packageFolder = join(__dirname, '..', 'api') - const apiOutputFolder = join(packageFolder, 'api') - const mainOutputFile = join(packageFolder, 'index.js') - const docOutputFile = join(__dirname, '..', 'docs', 'reference.asciidoc') - const typeDefFile = join(__dirname, '..', 'index.d.ts') - const requestParamsOutputFile = join(packageFolder, 'requestParams.d.ts') - - let log - downloadArtifacts({ version: opts.version, hash: opts.hash }) - .then(onArtifactsDownloaded) - .catch(err => { - console.log(err) - process.exit(1) - }) - - function onArtifactsDownloaded () { - log = ora('Generating APIs').start() - - log.text = 'Cleaning API folder...' - rimraf.sync(join(apiOutputFolder, '*.js')) - - const allSpec = readdirSync(downloadArtifacts.locations.specFolder) - .filter(file => file !== '_common.json') - .filter(file => !file.includes('deprecated')) - .sort() - .map(file => require(join(downloadArtifacts.locations.specFolder, file))) - - const namespaces = namespacify(readdirSync(downloadArtifacts.locations.specFolder)) - for (const namespace in namespaces) { - if (namespace === '_common') continue - const code = generate(namespace, namespaces[namespace], downloadArtifacts.locations.specFolder, opts.version) - const filePath = join(apiOutputFolder, `${namespace}.js`) - writeFileSync(filePath, code, { encoding: 'utf8' }) - } - - writeFileSync( - requestParamsOutputFile, - generateRequestTypes(opts.version, allSpec), - { encoding: 'utf8' } - ) - - const { fn: factory, types } = genFactory(apiOutputFolder, downloadArtifacts.locations.specFolder, namespaces) - writeFileSync( - mainOutputFile, - factory, - { encoding: 'utf8' } - ) - - const oldTypeDefString = readFileSync(typeDefFile, 'utf8') - const start = oldTypeDefString.indexOf('/* GENERATED */') - const end = oldTypeDefString.indexOf('/* /GENERATED */') - const newTypeDefString = oldTypeDefString.slice(0, start + 15) + '\n' + types + '\n ' + oldTypeDefString.slice(end) - writeFileSync( - typeDefFile, - newTypeDefString, - { encoding: 'utf8' } - ) - - lintFiles(log, () => { - log.text = 'Generating documentation' - writeFileSync( - docOutputFile, - generateDocs(require(join(downloadArtifacts.locations.specFolder, '_common.json')), allSpec), - { encoding: 'utf8' } - ) - - log.succeed('Done!') - }) - } - - function lintFiles (log, cb) { - log.text = 'Linting...' - const files = [join(packageFolder, '*.js'), join(apiOutputFolder, '*.js')] - standard.lintFiles(files, { fix: true }, err => { - if (err) { - return log.fail(err.message) - } - cb() - }) - } - - function namespacify (apis) { - return apis - .map(api => api.slice(0, -5)) - .filter(api => api !== '_common') - .filter(api => !api.includes('deprecated')) - .reduce((acc, val) => { - if (val.includes('.')) { - val = val.split('.') - acc[val[0]] = acc[val[0]] || [] - acc[val[0]].push(val[1]) - } else { - acc[val] = [] - } - return acc - }, {}) - } -} diff --git a/scripts/kibana-docker.sh b/scripts/kibana-docker.sh deleted file mode 100755 index 8c39f9647..000000000 --- a/scripts/kibana-docker.sh +++ /dev/null @@ -1,8 +0,0 @@ -#!/bin/bash - -exec docker run \ - --rm \ - -e ELASTICSEARCH_URL="http://elasticsearch:9200/" \ - -p 5601:5601 \ - --network=elastic \ - docker.elastic.co/kibana/kibana:7.0.0-beta1 diff --git a/scripts/release-canary.js b/scripts/release-canary.js index 3afcf3983..a4bd8780f 100644 --- a/scripts/release-canary.js +++ b/scripts/release-canary.js @@ -1,3 +1,8 @@ +/* + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 + */ + 'use strict' /** @@ -130,9 +135,9 @@ release( 'dry-run', // help text - 'help', + 'help' ], - alias: { help: 'h' }, + alias: { help: 'h' } }) ) .catch(err => { diff --git a/scripts/utils/clone-es.js b/scripts/utils/clone-es.js deleted file mode 100644 index 09f078918..000000000 --- a/scripts/utils/clone-es.js +++ /dev/null @@ -1,139 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -'use strict' - -const { accessSync, mkdirSync } = require('fs') -const { join } = require('path') -const Git = require('simple-git') - -const esRepo = 'https://github.com/elastic/elasticsearch.git' -const esFolder = join(__dirname, '..', '..', 'elasticsearch') -const apiFolder = join(esFolder, 'rest-api-spec', 'src', 'main', 'resources', 'rest-api-spec', 'api') -const xPackFolder = join(esFolder, 'x-pack', 'plugin', 'src', 'test', 'resources', 'rest-api-spec', 'api') - -function cloneAndCheckout (opts, callback) { - const { log, tag, branch } = opts - withTag(tag, callback) - - /** - * Sets the elasticsearch repository to the given tag. - * If the repository is not present in `esFolder` it will - * clone the repository and the checkout the tag. - * If the repository is already present but it cannot checkout to - * the given tag, it will perform a pull and then try again. - * @param {string} tag - * @param {function} callback - */ - function withTag (tag, callback) { - let fresh = false - let retry = 0 - - if (!pathExist(esFolder)) { - if (!createFolder(esFolder)) { - log.fail('Failed folder creation') - return - } - fresh = true - } - - const git = Git(esFolder) - - if (fresh) { - clone(checkout) - } else if (opts.branch) { - checkout(true) - } else { - checkout() - } - - function checkout (alsoPull = false) { - if (branch) { - log.text = `Checking out branch '${branch}'` - } else { - log.text = `Checking out tag '${tag}'` - } - git.checkout(branch || tag, err => { - if (err) { - if (retry++ > 0) { - callback(new Error(`Cannot checkout tag '${tag}'`), { apiFolder, xPackFolder }) - return - } - return pull(checkout) - } - if (alsoPull) { - return pull(checkout) - } - callback(null, { apiFolder, xPackFolder }) - }) - } - - function pull (cb) { - log.text = 'Pulling elasticsearch repository...' - git.pull(err => { - if (err) { - callback(err, { apiFolder, xPackFolder }) - return - } - cb() - }) - } - - function clone (cb) { - log.text = 'Cloning elasticsearch repository...' - git.clone(esRepo, esFolder, err => { - if (err) { - callback(err, { apiFolder, xPackFolder }) - return - } - cb() - }) - } - } - - /** - * Checks if the given path exists - * @param {string} path - * @returns {boolean} true if exists, false if not - */ - function pathExist (path) { - try { - accessSync(path) - return true - } catch (err) { - return false - } - } - - /** - * Creates the given folder - * @param {string} name - * @returns {boolean} true on success, false on failure - */ - function createFolder (name) { - try { - mkdirSync(name) - return true - } catch (err) { - return false - } - } -} - -module.exports = cloneAndCheckout diff --git a/scripts/utils/generateApis.js b/scripts/utils/generateApis.js deleted file mode 100644 index cb99b3701..000000000 --- a/scripts/utils/generateApis.js +++ /dev/null @@ -1,553 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/* eslint camelcase: 0 */ - -'use strict' - -const { join } = require('path') -const dedent = require('dedent') -const allowedMethods = { - noBody: ['GET', 'HEAD', 'DELETE'], - body: ['POST', 'PUT', 'DELETE'] -} - -// if a parameter is depracted in a minor release -// we should be able to support it until the next major -const deprecatedParameters = require('./patch.json') - -// list of apis that does not need any kind of validation -// because of how the url is built or the `type` handling in ES7 -const noPathValidation = [ - 'create', - 'exists', - 'explain', - 'get', - 'get_source', - 'index', - 'indices.get_alias', - 'indices.exists_alias', - 'indices.get_field_mapping', - 'indices.get_mapping', - 'indices.get_settings', - 'indices.put_mapping', - 'indices.stats', - 'delete', - 'nodes.info', - 'nodes.stats', - 'nodes.usage', - 'tasks.cancel', - 'termvectors', - 'update' -] - -function generateNamespace (namespace, nested, specFolder, version) { - const common = require(join(specFolder, '_common.json')) - let code = dedent` - /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - - 'use strict' - - /* eslint camelcase: 0 */ - /* eslint no-unused-vars: 0 */ - - const { handleError, snakeCaseKeys, normalizeArguments, kConfigurationError } = require('../utils') -` - if (nested.length > 0) { - let getters = '' - for (const n of nested) { - if (n.includes('_')) { - const nameSnaked = n - .replace(/\.([a-z])/g, k => k[1].toUpperCase()) - .replace(/_([a-z])/g, k => k[1].toUpperCase()) - getters += `${n}: { get () { return this.${nameSnaked} } },\n` - } - } - const api = generateMultiApi(version, namespace, nested, common, specFolder) - if (getters.length > 0) { - getters = `Object.defineProperties(${api.namespace}Api.prototype, {\n${getters}})` - } - - code += ` - const acceptedQuerystring = ${JSON.stringify(api.acceptedQuerystring)} - const snakeCase = ${JSON.stringify(api.snakeCase)} - - function ${api.namespace}Api (transport, ConfigurationError) { - this.transport = transport - this[kConfigurationError] = ConfigurationError - } - - ${api.code} - - ${getters} - - module.exports = ${api.namespace}Api - ` - } else { - const spec = require(join(specFolder, `${namespace}.json`)) - const api = generateSingleApi(version, spec, common) - code += ` - const acceptedQuerystring = ${JSON.stringify(api.acceptedQuerystring)} - const snakeCase = ${JSON.stringify(api.snakeCase)} - - ${api.code} - - module.exports = ${api.name}Api - ` - } - return code -} - -function generateMultiApi (version, namespace, nested, common, specFolder) { - const namespaceSnaked = namespace - .replace(/\.([a-z])/g, k => k[1].toUpperCase()) - .replace(/_([a-z])/g, k => k[1].toUpperCase()) - let code = '' - const snakeCase = {} - const acceptedQuerystring = [] - for (const n of nested) { - const nameSnaked = n - .replace(/\.([a-z])/g, k => k[1].toUpperCase()) - .replace(/_([a-z])/g, k => k[1].toUpperCase()) - const spec = require(join(specFolder, `${namespace}.${n}.json`)) - const api = generateSingleApi(version, spec, common) - code += `${Uppercase(namespaceSnaked)}Api.prototype.${nameSnaked} = ${api.code}\n\n` - Object.assign(snakeCase, api.snakeCase) - for (const q of api.acceptedQuerystring) { - if (!acceptedQuerystring.includes(q)) { - acceptedQuerystring.push(q) - } - } - } - return { code, snakeCase, acceptedQuerystring, namespace: Uppercase(namespaceSnaked) } -} - -function generateSingleApi (version, spec, common) { - const release = version.charAt(0) - const api = Object.keys(spec)[0] - const name = api - .replace(/\.([a-z])/g, k => k[1].toUpperCase()) - .replace(/_([a-z])/g, k => k[1].toUpperCase()) - - const { paths } = spec[api].url - const { params } = spec[api] - const acceptedQuerystring = [] - const required = [] - - const methods = paths.reduce((acc, val) => { - for (const method of val.methods) { - if (!acc.includes(method)) acc.push(method) - } - return acc - }, []) - const parts = paths.reduce((acc, val) => { - if (!val.parts) return acc - for (const part of Object.keys(val.parts)) { - if (!acc.includes(part)) acc.push(part) - } - return acc - }, []) - - // get the required parts from the url - // if the url has at least one static path, - // then there are not required parts of the url - let allParts = [] - for (const path of paths) { - if (path.parts) { - allParts.push(Object.keys(path.parts)) - } else { - allParts = [] - break - } - } - if (allParts.length > 0) { - intersect(...allParts).forEach(r => required.push(r)) - } - - for (const key in params) { - if (params[key].required) { - required.push(key) - } - - acceptedQuerystring.push(key) - if (deprecatedParameters[release] && deprecatedParameters[release][key]) { - acceptedQuerystring.push(deprecatedParameters[release][key]) - } - } - - for (const key in spec[api]) { - const k = spec[api][key] - if (k && k.required) { - required.push(key) - } - } - if (common && common.params) { - for (const key in common.params) { - acceptedQuerystring.push(key) - } - } - - const code = ` - function ${name}Api (params, options, callback) { - ;[params, options, callback] = normalizeArguments(params, options, callback) - - ${genRequiredChecks()} - - ${genUrlValidation(paths, api)} - - let { ${genQueryDenylist(false)}, ...querystring } = params - querystring = snakeCaseKeys(acceptedQuerystring, snakeCase, querystring) - - let path = '' - ${buildPath()} - - // build request object - const request = { - method, - path, - ${genBody(api, methods, spec[api].body, spec)} - querystring - } - - return this.transport.request(request, options, callback) - } - `.trim() // always call trim to avoid newlines - - return { - name, - code, - acceptedQuerystring: acceptedQuerystring, - snakeCase: genSnakeCaseMap(), - documentation: generateDocumentation(spec[api], api) - } - - function genRequiredChecks () { - const code = required - .map(_genRequiredCheck) - .concat(_noBody()) - .filter(Boolean) - - if (code.length) { - code.unshift('// check required parameters') - } - - return code.join('\n ') - - function _genRequiredCheck (param) { - const camelCased = param[0] === '_' - ? '_' + param.slice(1).replace(/_([a-z])/g, k => k[1].toUpperCase()) - : param.replace(/_([a-z])/g, k => k[1].toUpperCase()) - - if (param === camelCased) { - const check = ` - if (params['${param}'] == null) { - const err = new this[kConfigurationError]('Missing required parameter: ${param}') - return handleError(err, callback) - } - ` - return check.trim() - } else { - const check = ` - if (params['${param}'] == null && params['${camelCased}'] == null) { - const err = new this[kConfigurationError]('Missing required parameter: ${param} or ${camelCased}') - return handleError(err, callback) - } - ` - return check.trim() - } - } - - function _noBody () { - const check = ` - if (params.body != null) { - const err = new this[kConfigurationError]('This API does not require a body') - return handleError(err, callback) - } - ` - return spec[api].body === null ? check.trim() : '' - } - } - - function genSnakeCaseMap () { - const toCamelCase = str => { - return str[0] === '_' - ? '_' + str.slice(1).replace(/_([a-z])/g, k => k[1].toUpperCase()) - : str.replace(/_([a-z])/g, k => k[1].toUpperCase()) - } - - return acceptedQuerystring.reduce((acc, val, index) => { - if (toCamelCase(val) !== val) { - acc[toCamelCase(val)] = val - } - return acc - }, {}) - } - - function genQueryDenylist (addQuotes = true) { - const toCamelCase = str => { - return str[0] === '_' - ? '_' + str.slice(1).replace(/_([a-z])/g, k => k[1].toUpperCase()) - : str.replace(/_([a-z])/g, k => k[1].toUpperCase()) - } - - const denylist = ['method', 'body'] - parts.forEach(p => { - const camelStr = toCamelCase(p) - if (camelStr !== p) denylist.push(`${camelStr}`) - denylist.push(`${p}`) - }) - return addQuotes ? denylist.map(q => `'${q}'`) : denylist - } - - function buildPath () { - const toCamelCase = str => { - return str[0] === '_' - ? '_' + str.slice(1).replace(/_([a-z])/g, k => k[1].toUpperCase()) - : str.replace(/_([a-z])/g, k => k[1].toUpperCase()) - } - - const genAccessKey = str => { - const camelStr = toCamelCase(str) - return camelStr === str - ? str - : `${str} || ${camelStr}` - } - - const genCheck = path => { - return path - .split('/') - .filter(Boolean) - .map(p => p.startsWith('{') ? `(${genAccessKey(p.slice(1, -1))}) != null` : false) - .filter(Boolean) - .join(' && ') - } - - const genPath = path => { - path = path - .split('/') - .filter(Boolean) - .map(p => p.startsWith('{') ? `encodeURIComponent(${genAccessKey(p.slice(1, -1))})` : `'${p}'`) - .join(' + \'/\' + ') - return path.length > 0 ? ('\'/\' + ' + path) : '\'/\'' - } - - let hasStaticPath = false - let sortedPaths = paths - // some legacy API have mutliple statis paths - // this filter removes them - .filter(p => { - if (p.path.includes('{')) return true - if (hasStaticPath === false && p.deprecated == null) { - hasStaticPath = true - return true - } - return false - }) - // sort by number of parameters (desc) - .sort((a, b) => Object.keys(b.parts || {}).length - Object.keys(a.parts || {}).length) - - const allDeprecated = paths.filter(path => path.deprecated != null) - if (allDeprecated.length === paths.length) sortedPaths = [paths[0]] - - let code = '' - for (let i = 0; i < sortedPaths.length; i++) { - const { path, methods } = sortedPaths[i] - if (sortedPaths.length === 1) { - code += `if (method == null) method = ${generatePickMethod(methods)} - path = ${genPath(path)} - ` - } else if (i === 0) { - code += `if (${genCheck(path)}) { - if (method == null) method = ${generatePickMethod(methods)} - path = ${genPath(path)} - } - ` - } else if (i === sortedPaths.length - 1) { - code += ` else { - if (method == null) method = ${generatePickMethod(methods)} - path = ${genPath(path)} - } - ` - } else { - code += ` else if (${genCheck(path)}) { - if (method == null) method = ${generatePickMethod(methods)} - path = ${genPath(path)} - } - ` - } - } - - return code - } -} - -function generatePickMethod (methods) { - if (methods.length === 1) { - return `'${methods[0]}'` - } - const bodyMethod = getBodyMethod(methods) - const noBodyMethod = getNoBodyMethod(methods) - if (bodyMethod && noBodyMethod) { - return `body == null ? '${noBodyMethod}' : '${bodyMethod}'` - } else if (bodyMethod) { - return `'${bodyMethod}'` - } else { - return `'${noBodyMethod}'` - } -} - -function genBody (api, methods, body, spec) { - const bodyMethod = getBodyMethod(methods) - const { content_type } = spec[api].headers - if (content_type && content_type.includes('application/x-ndjson')) { - return 'bulkBody: body,' - } - if (body === null && bodyMethod) { - return 'body: \'\',' - } else if (bodyMethod) { - return 'body: body || \'\',' - } else { - return 'body: null,' - } -} - -function getBodyMethod (methods) { - const m = methods.filter(m => ~allowedMethods.body.indexOf(m)) - if (m.length) return m[0] - return null -} - -function getNoBodyMethod (methods) { - const m = methods.filter(m => ~allowedMethods.noBody.indexOf(m)) - if (m.length) return m[0] - return null -} - -function genUrlValidation (paths, api) { - // this api does not need url validation - if (!needsPathValidation(api)) return '' - // gets only the dynamic components of the url in an array - // then we reverse it. A parameters always require what is - // at its right in the array. - const chunks = paths - .sort((a, b) => Object.keys(a.parts || {}).length > Object.keys(b.parts || {}).length ? -1 : 1) - .slice(0, 1) - .reduce((acc, val) => val.path, '') - // .reduce((a, b) => a.path.split('/').length > b.path.split('/').length ? a.path : b.path) - .split('/') - .filter(s => s.startsWith('{')) - .map(s => s.slice(1, -1)) - .reverse() - - let code = '' - - const len = chunks.length - chunks.forEach((chunk, index) => { - if (index === len - 1) return - const params = [] - let camelCased = chunk[0] === '_' - ? '_' + chunk.slice(1).replace(/_([a-z])/g, k => k[1].toUpperCase()) - : chunk.replace(/_([a-z])/g, k => k[1].toUpperCase()) - - if (chunk === camelCased) { - code += `${index ? '} else ' : ''}if (params['${chunk}'] != null && (` - } else { - code += `${index ? '} else ' : ''}if ((params['${chunk}'] != null || params['${camelCased}'] != null) && (` - } - for (let i = index + 1; i < len; i++) { - params.push(chunks[i]) - // url parts can be declared in camelCase fashion - camelCased = chunks[i][0] === '_' - ? '_' + chunks[i].slice(1).replace(/_([a-z])/g, k => k[1].toUpperCase()) - : chunks[i].replace(/_([a-z])/g, k => k[1].toUpperCase()) - - if (chunks[i] === camelCased) { - code += `params['${chunks[i]}'] == null${i === len - 1 ? '' : ' || '}` - } else { - code += `(params['${chunks[i]}'] == null && params['${camelCased}'] == null)${i === len - 1 ? '' : ' || '}` - } - } - code += `)) { - const err = new this[kConfigurationError]('Missing required parameter of the url: ${params.join(', ')}') - return handleError(err, callback) - ` - }) - - if (chunks.length > 1) { - code += '\n}' - } - - if (code.length) { - code = '// check required url components\n' + code - } - - return code.trim() -} - -function generateDocumentation ({ documentation }, op) { - // we use `replace(/\u00A0/g, ' ')` to remove no breaking spaces - // because some parts of the description fields are using it - - if (documentation == null) return '' - - let doc = '/**\n' - doc += ` * Perform a ${op} request\n` - if (documentation.description) { - doc += ` * ${documentation.description.replace(/\u00A0/g, ' ')}\n` - } - if (documentation.url) { - doc += ` * ${documentation.url}\n` - } - doc += ' */' - - return doc -} - -function needsPathValidation (api) { - return noPathValidation.indexOf(api) === -1 -} - -function intersect (first, ...rest) { - return rest.reduce((accum, current) => { - return accum.filter(x => current.indexOf(x) !== -1) - }, first) -} - -function Uppercase (str) { - return str[0].toUpperCase() + str.slice(1) -} - -module.exports = generateNamespace diff --git a/scripts/utils/generateDocs.js b/scripts/utils/generateDocs.js deleted file mode 100644 index 9f681ab49..000000000 --- a/scripts/utils/generateDocs.js +++ /dev/null @@ -1,318 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -'use strict' - -const { readdirSync } = require('fs') -const { join } = require('path') -const dedent = require('dedent') - -const codeExamples = readdirSync(join(__dirname, '..', '..', 'docs', 'examples')) - .map(file => file.slice(0, -9)) - .filter(api => api !== 'index') - -function generateDocs (common, spec) { - let doc = dedent` - [[api-reference]] - - //////// - - - - =========================================================================================================================== - || || - || || - || || - || ██████╗ ███████╗ █████╗ ██████╗ ███╗ ███╗███████╗ || - || ██╔══██╗██╔════╝██╔══██╗██╔══██╗████╗ ████║██╔════╝ || - || ██████╔╝█████╗ ███████║██║ ██║██╔████╔██║█████╗ || - || ██╔══██╗██╔══╝ ██╔══██║██║ ██║██║╚██╔╝██║██╔══╝ || - || ██║ ██║███████╗██║ ██║██████╔╝██║ ╚═╝ ██║███████╗ || - || ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝╚══════╝ || - || || - || || - || This file is autogenerated, DO NOT send pull requests that changes this file directly. || - || You should update the script that does the generation, which can be found in '/scripts/utils/generateDocs.js'. || - || || - || You can run the script with the following command: || - || node scripts/generate --branch || - || or || - || node scripts/generate --tag || - || || - || || - || || - =========================================================================================================================== - - - - //////// - - == API Reference - - This document contains the entire list of the Elasticsearch API supported by the client, both OSS and commercial. The client is entirely licensed under Apache 2.0. - - Elasticsearch exposes an HTTP layer to communicate with, and the client is a library that will help you do this. Because of this reason, you will see HTTP related parameters, such as ${'`'}body${'`'} or ${'`'}headers${'`'}. - - Every API can accept two objects, the first contains all the parameters that will be sent to Elasticsearch, while the second includes the request specific parameters, such as timeouts, headers, and so on. - In the first object, every parameter but the body will be sent via querystring or url parameter, depending on the API, and every unrecognized parameter will be sent as querystring. - - [source,js] - ---- - // promise API - const result = await client.search({ - index: 'my-index', - from: 20, - size: 10, - body: { foo: 'bar' } - }, { - ignore: [404], - maxRetries: 3 - }) - - // callback API - client.search({ - index: 'my-index', - from: 20, - size: 10, - body: { foo: 'bar' } - }, { - ignore: [404], - maxRetries: 3 - }, (err, result) => { - if (err) console.log(err) - }) - ---- - - In this document, you will find the reference of every parameter accepted by the querystring or the url. If you also need to send the body, you can find the documentation of its format in the reference link that is present along with every endpoint. - - \n\n` - doc += commonParameters(common) - spec.forEach(s => { - doc += '\n' + generateApiDoc(s) - }) - return doc -} - -function commonParameters (spec) { - let doc = dedent` - [discrete] - === Common parameters - Parameters that are accepted by all API endpoints. - - link:{ref}/common-options.html[Documentation] - [cols=2*] - |===\n` - Object.keys(spec.params).forEach(key => { - const name = isSnakeCased(key) && key !== camelify(key) - ? '`' + key + '` or `' + camelify(key) + '`' - : '`' + key + '`' - - doc += dedent` - |${name} - |${'`' + spec.params[key].type + '`'} - ${spec.params[key].description}` - if (spec.params[key].default) { - doc += ` + - _Default:_ ${'`' + spec.params[key].default + '`'}` - } - doc += '\n\n' - }) - - doc += dedent` - |=== - ` - return doc -} - -function generateApiDoc (spec) { - const name = Object.keys(spec)[0] - const documentationUrl = spec[name].documentation && spec[name].documentation.url - ? fixLink(name, spec[name].documentation.url) - : '' - const params = [] - // url params - const urlParts = spec[name].url.paths.reduce((acc, path) => { - if (!path.parts) return acc - for (const part in path.parts) { - if (acc[part] != null) continue - acc[part] = path.parts[part] - } - return acc - }, {}) - if (urlParts) { - Object.keys(urlParts).forEach(param => { - params.push({ - name: param, - type: getType(urlParts[param].type, urlParts[param].options), - description: urlParts[param].description, - default: urlParts[param].default, - deprecated: !!urlParts[param].deprecated - }) - }) - } - - // query params - const urlParams = spec[name].params - if (urlParams) { - Object.keys(urlParams).forEach(param => { - const duplicate = params.find(ele => ele.name === param) - if (duplicate) return - params.push({ - name: param, - type: getType(urlParams[param].type, urlParams[param].options), - description: urlParams[param].description, - default: urlParams[param].default, - deprecated: !!urlParams[param].deprecated - }) - }) - } - - // body params - const body = spec[name].body - if (body) { - params.push({ - name: 'body', - type: 'object', - description: body.description, - default: body.default, - deprecated: !!body.deprecated - }) - } - - const codeParameters = params - .reduce((acc, val) => { - const code = `${val.name}: ${val.type},` - acc += acc === '' - ? code - : '\n ' + code - - return acc - }, '') - // remove last comma - .slice(0, -1) - - const stability = spec[name].stability === 'stable' - ? '' - : `*Stability:* ${spec[name].stability}` - - let doc = dedent` - [discrete] - === ${camelify(name)} - ${stability} - [source,ts] - ---- - client.${camelify(name)}(${codeParameters.length > 0 ? `{\n ${codeParameters}\n}` : ''}) - ----\n` - if (documentationUrl) { - doc += `link:${documentationUrl}[Documentation] +\n` - } - if (codeExamples.includes(name)) { - doc += `{jsclient}/${name.replace(/\./g, '_')}_examples.html[Code Example] +\n` - } - - if (params.length !== 0) { - doc += dedent`[cols=2*] - |===\n` - doc += params.reduce((acc, val) => { - const name = isSnakeCased(val.name) && val.name !== camelify(val.name) - ? '`' + val.name + '` or `' + camelify(val.name) + '`' - : '`' + val.name + '`' - acc += dedent` - |${name} - |${'`' + val.type.replace(/\|/g, '\\|') + '`'} - ${val.description}` - if (val.default) { - acc += ` +\n_Default:_ ${'`' + val.default + '`'}` - } - if (val.deprecated) { - acc += ' +\n\nWARNING: This parameter has been deprecated.' - } - return acc + '\n\n' - }, '') - - doc += dedent` - |=== - ` - } - doc += '\n' - return doc -} - -const LINK_OVERRIDES = { - 'license.delete': '{ref}/delete-license.html', - 'license.get': '{ref}/get-license.html', - 'license.get_basic_status': '{ref}/get-basic-status.html', - 'license.get_trial_status': '{ref}/get-trial-status.html', - 'license.post': '{ref}/update-license.html', - 'license.post_start_basic': '{ref}/start-basic.html', - 'license.post_start_trial': '{ref}/start-trial.html', - 'migration.deprecations': '{ref}/migration-api-deprecation.html', - 'monitoring.bulk': '{ref}/monitor-elasticsearch-cluster.html', - 'ingest.delete_pipeline': '{ref}/delete-pipeline-api.html', - 'ingest.get_pipeline': '{ref}/get-pipeline-api.html', - 'ingest.put_pipeline': '{ref}/put-pipeline-api.html', - 'ingest.simulate': '{ref}/simulate-pipeline-api.html', - 'ingest.processor_grok': '{ref}/grok-processor.html#grok-processor-rest-get' -} -// Fixes bad urls in the JSON spec -function fixLink (name, str) { - /* In 6.x some API start with `xpack.` when in master they do not. We - * can safely ignore that for link generation. */ - name = name.replace(/^xpack\./, '') - const override = LINK_OVERRIDES[name] - if (override) return override - if (!str) return '' - /* Replace references to the guide with the attribute {ref} because - * the json files in the Elasticsearch repo are a bit of a mess. */ - str = str.replace(/^.+guide\/en\/elasticsearch\/reference\/[^/]+\/([^./]*\.html(?:#.+)?)$/, '{ref}/$1') - str = str.replace(/frozen\.html/, 'freeze-index-api.html') - str = str.replace(/ml-file-structure\.html/, 'ml-find-file-structure.html') - str = str.replace(/security-api-get-user-privileges\.html/, 'security-api-get-privileges.html') - - return str -} - -function getType (type, options) { - switch (type) { - case 'list': - return 'string | string[]' - case 'date': - case 'time': - case 'timeout': - return 'string' - case 'enum': - return options.map(k => `'${k}'`).join(' | ') - case 'int': - case 'double': - case 'long': - return 'number' - default: - return type - } -} - -function camelify (str) { - return str[0] === '_' - ? '_' + str.slice(1).replace(/_([a-z])/g, k => k[1].toUpperCase()) - : str.replace(/_([a-z])/g, k => k[1].toUpperCase()) -} - -function isSnakeCased (str) { - return !!~str.indexOf('_') -} - -module.exports = generateDocs diff --git a/scripts/utils/generateMain.js b/scripts/utils/generateMain.js deleted file mode 100644 index 02a4873ed..000000000 --- a/scripts/utils/generateMain.js +++ /dev/null @@ -1,299 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/* eslint-disable no-template-curly-in-string */ -/* eslint camelcase: 0 */ - -'use strict' - -const { readdirSync } = require('fs') -const { join } = require('path') -const dedent = require('dedent') -const deepmerge = require('deepmerge') - -function genFactory (folder, specFolder, namespaces) { - // get all the API files - // const apiFiles = readdirSync(folder) - const apiFiles = readdirSync(specFolder) - .filter(file => file !== '_common.json') - .filter(file => !file.includes('deprecated')) - .sort() - const types = apiFiles - .map(file => { - const name = file - .slice(0, -5) - .replace(/\.([a-z])/g, k => k[1].toUpperCase()) - .replace(/_([a-z])/g, k => k[1].toUpperCase()) - - return file - .slice(0, -5) // remove `.json` extension - .split('.') - .reverse() - .reduce((acc, val) => { - const spec = readSpec(specFolder, file.slice(0, -5)) - const isHead = isHeadMethod(spec, file.slice(0, -5)) - const body = hasBody(spec, file.slice(0, -5)) - const methods = acc === null ? buildMethodDefinition({ kibana: false }, val, name, body, isHead, spec) : null - const obj = {} - if (methods) { - for (const m of methods) { - obj[m.key] = m.val - } - } else { - obj[val] = acc - if (isSnakeCased(val)) { - obj[camelify(val)] = acc - } - } - return obj - }, null) - }) - .reduce((acc, val) => deepmerge(acc, val), {}) - - const kibanaTypes = apiFiles - .map(file => { - const name = file - .slice(0, -5) - .replace(/\.([a-z])/g, k => k[1].toUpperCase()) - .replace(/_([a-z])/g, k => k[1].toUpperCase()) - - return file - .slice(0, -5) // remove `.json` extension - .split('.') - .reverse() - .reduce((acc, val) => { - const spec = readSpec(specFolder, file.slice(0, -5)) - const isHead = isHeadMethod(spec, file.slice(0, -5)) - const body = hasBody(spec, file.slice(0, -5)) - const methods = acc === null ? buildMethodDefinition({ kibana: true }, val, name, body, isHead, spec) : null - const obj = {} - if (methods) { - for (const m of methods) { - obj[m.key] = m.val - } - } else { - obj[camelify(val)] = acc - } - return obj - }, null) - }) - .reduce((acc, val) => deepmerge(acc, val), {}) - - // serialize the type object - const typesStr = Object.keys(types) - .map(key => { - const line = ` ${key}: ${JSON.stringify(types[key], null, 4)}` - if (line.slice(-1) === '}') { - return line.slice(0, -1) + ' }' - } - return line - }) - .join('\n') - // remove useless quotes and commas - .replace(/"/g, '') - .replace(/,$/gm, '') - const kibanaTypesStr = Object.keys(kibanaTypes) - .map(key => { - const line = ` ${key}: ${JSON.stringify(kibanaTypes[key], null, 4)}` - if (line.slice(-1) === '}') { - return line.slice(0, -1) + ' }' - } - return line - }) - .join('\n') - // remove useless quotes and commas - .replace(/"/g, '') - .replace(/,$/gm, '') - - let apisStr = '' - const getters = [] - for (const namespace in namespaces) { - if (namespaces[namespace].length > 0) { - getters.push(`${camelify(namespace)}: { - get () { - if (this[k${toPascalCase(camelify(namespace))}] === null) { - this[k${toPascalCase(camelify(namespace))}] = new ${toPascalCase(camelify(namespace))}Api(this.transport, this[kConfigurationError]) - } - return this[k${toPascalCase(camelify(namespace))}] - } - },\n`) - if (namespace.includes('_')) { - getters.push(`${namespace}: { get () { return this.${camelify(namespace)} } },\n`) - } - } else { - apisStr += `ESAPI.prototype.${camelify(namespace)} = ${camelify(namespace)}Api\n` - if (namespace.includes('_')) { - getters.push(`${namespace}: { get () { return this.${camelify(namespace)} } },\n`) - } - } - } - - apisStr += '\nObject.defineProperties(ESAPI.prototype, {\n' - for (const getter of getters) { - apisStr += getter - } - apisStr += '})' - - let modules = '' - let symbols = '' - let symbolsInstance = '' - for (const namespace in namespaces) { - if (namespaces[namespace].length > 0) { - modules += `const ${toPascalCase(camelify(namespace))}Api = require('./api/${namespace}')\n` - symbols += `const k${toPascalCase(camelify(namespace))} = Symbol('${toPascalCase(camelify(namespace))}')\n` - symbolsInstance += `this[k${toPascalCase(camelify(namespace))}] = null\n` - } else { - modules += `const ${camelify(namespace)}Api = require('./api/${namespace}')\n` - } - } - - const fn = dedent` - /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - - 'use strict' - - ${modules} - - const { kConfigurationError } = require('./utils') - ${symbols} - - function ESAPI (opts) { - this[kConfigurationError] = opts.ConfigurationError - ${symbolsInstance} - } - - ${apisStr} - - module.exports = ESAPI - ` - - // new line at the end of file - return { fn: fn + '\n', types: typesStr, kibanaTypes: kibanaTypesStr } -} - -// from snake_case to camelCase -function camelify (str) { - return str.replace(/_([a-z])/g, k => k[1].toUpperCase()) -} - -function isSnakeCased (str) { - return !!~str.indexOf('_') -} - -function toPascalCase (str) { - return str[0].toUpperCase() + str.slice(1) -} - -function buildMethodDefinition (opts, api, name, hasBody, isHead, spec) { - const Name = toPascalCase(name) - const { content_type } = spec[Object.keys(spec)[0]].headers - const bodyType = content_type && content_type.includes('application/x-ndjson') ? 'RequestNDBody' : 'RequestBody' - const responseType = isHead ? 'boolean' : 'Record' - const defaultBodyType = content_type && content_type.includes('application/x-ndjson') ? 'Record[]' : 'Record' - - if (opts.kibana) { - if (hasBody) { - return [ - { key: `${camelify(api)}(params?: RequestParams.${Name}, options?: TransportRequestOptions)`, val: 'TransportRequestPromise>' } - ] - } else { - return [ - { key: `${camelify(api)}(params?: RequestParams.${Name}, options?: TransportRequestOptions)`, val: 'TransportRequestPromise>' } - ] - } - } - - if (hasBody) { - let methods = [ - { key: `${api}(params?: RequestParams.${Name}, options?: TransportRequestOptions)`, val: 'TransportRequestPromise>' }, - { key: `${api}(callback: callbackFn)`, val: 'TransportRequestCallback' }, - { key: `${api}(params: RequestParams.${Name}, callback: callbackFn)`, val: 'TransportRequestCallback' }, - { key: `${api}(params: RequestParams.${Name}, options: TransportRequestOptions, callback: callbackFn)`, val: 'TransportRequestCallback' } - ] - if (isSnakeCased(api)) { - methods = methods.concat([ - { key: `${camelify(api)}(params?: RequestParams.${Name}, options?: TransportRequestOptions)`, val: 'TransportRequestPromise>' }, - { key: `${camelify(api)}(callback: callbackFn)`, val: 'TransportRequestCallback' }, - { key: `${camelify(api)}(params: RequestParams.${Name}, callback: callbackFn)`, val: 'TransportRequestCallback' }, - { key: `${camelify(api)}(params: RequestParams.${Name}, options: TransportRequestOptions, callback: callbackFn)`, val: 'TransportRequestCallback' } - ]) - } - return methods - } else { - let methods = [ - { key: `${api}(params?: RequestParams.${Name}, options?: TransportRequestOptions)`, val: 'TransportRequestPromise>' }, - { key: `${api}(callback: callbackFn)`, val: 'TransportRequestCallback' }, - { key: `${api}(params: RequestParams.${Name}, callback: callbackFn)`, val: 'TransportRequestCallback' }, - { key: `${api}(params: RequestParams.${Name}, options: TransportRequestOptions, callback: callbackFn)`, val: 'TransportRequestCallback' } - ] - if (isSnakeCased(api)) { - methods = methods.concat([ - { key: `${camelify(api)}(params?: RequestParams.${Name}, options?: TransportRequestOptions)`, val: 'TransportRequestPromise>' }, - { key: `${camelify(api)}(callback: callbackFn)`, val: 'TransportRequestCallback' }, - { key: `${camelify(api)}(params: RequestParams.${Name}, callback: callbackFn)`, val: 'TransportRequestCallback' }, - { key: `${camelify(api)}(params: RequestParams.${Name}, options: TransportRequestOptions, callback: callbackFn)`, val: 'TransportRequestCallback' } - ]) - } - return methods - } -} - -function hasBody (spec, api) { - return !!spec[api].body -} - -function isHeadMethod (spec, api) { - const { paths } = spec[api].url - const methods = [] - for (const path of paths) { - for (const method of path.methods) { - if (!methods.includes(method)) { - methods.push(method) - } - } - } - return methods.length === 1 && methods[0] === 'HEAD' -} - -function readSpec (specFolder, file) { - try { - return require(join(specFolder, file)) - } catch (err) { - throw new Error(`Cannot read spec file ${file}`) - } -} - -module.exports = genFactory diff --git a/scripts/utils/generateRequestTypes.js b/scripts/utils/generateRequestTypes.js deleted file mode 100644 index 854e9ebcf..000000000 --- a/scripts/utils/generateRequestTypes.js +++ /dev/null @@ -1,191 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/* eslint camelcase: 0 */ - -'use strict' - -const deprecatedParameters = require('./patch.json') - -function generate (version, api) { - const release = version.charAt(0) - let types = `/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -import { RequestBody, RequestNDBody } from '../lib/Transport' - -export interface Generic { - method?: string; - filter_path?: string | string[]; - pretty?: boolean; - human?: boolean; - error_trace?: boolean; - source?: string; -} -` - - api.forEach(generateRequestType) - return types - - function generateRequestType (spec) { - const api = Object.keys(spec)[0] - const name = api - .replace(/\.([a-z])/g, k => k[1].toUpperCase()) - .replace(/_([a-z])/g, k => k[1].toUpperCase()) - - const { paths = {} } = spec[api].url - const { body, params = {} } = spec[api] - - // get the required parts from the url - // if the url has at least one static path, - // then there are not required parts of the url - let allParts = [] - let requiredParts = [] - for (const path of paths) { - if (path.parts) { - allParts.push(Object.keys(path.parts)) - } else { - allParts = [] - break - } - } - if (allParts.length > 0) { - requiredParts = intersect(...allParts) - } - - const parts = paths.reduce((acc, path) => { - if (!path.parts) return acc - for (const part in path.parts) { - if (acc[part] != null) continue - acc[part] = { key: part, value: path.parts[part], required: requiredParts.includes(part) } - } - return acc - }, {}) - const deprecatedParametersToAdd = [] - const paramsArr = Object.keys(params) - .filter(k => !Object.keys(parts).includes(k)) - .map(k => { - if (deprecatedParameters[release] && deprecatedParameters[release][k]) { - deprecatedParametersToAdd.push({ - key: deprecatedParameters[release][k], - value: params[k], - required: params[k].required - }) - } - return { key: k, value: params[k], required: params[k].required } - }) - - const partsArr = Object.keys(parts).map(k => parts[k]) - deprecatedParametersToAdd.forEach(k => partsArr.push(k)) - - const genLine = e => { - const optional = e.required ? '' : '?' - return `${e.key}${optional}: ${getType(e.value.type, e.value.options)};` - } - - const { content_type } = spec[api].headers - const bodyGeneric = content_type && content_type.includes('application/x-ndjson') ? 'RequestNDBody' : 'RequestBody' - - const code = ` -export interface ${toPascalCase(name)}${body ? `` : ''} extends Generic { - ${partsArr.map(genLine).join('\n ')} - ${paramsArr.map(genLine).join('\n ')} - ${body ? `body${body.required ? '' : '?'}: T;` : ''} -} -` - - types += '\n' - // remove empty lines - types += code.replace(/^\s*\n/gm, '') - } - - function getType (type, options) { - switch (type) { - case 'list': - return 'string | string[]' - case 'date': - case 'time': - case 'timeout': - return 'string' - case 'enum': { - // the following code changes 'true' | 'false' to boolean - let foundTrue = false - let foundFalse = false - options = options - .map(k => { - if (k === 'true') { - foundTrue = true - return true - } else if (k === 'false') { - foundFalse = true - return false - } else { - return `'${k}'` - } - }) - .filter(k => { - if (foundTrue && foundFalse && (k === true || k === false)) { - return false - } - return true - }) - if (foundTrue && foundFalse) { - options.push('boolean') - } - return options.join(' | ') - } - case 'int': - case 'double': - case 'long': - return 'number' - case 'boolean|long': - return 'boolean | number' - default: - return type - } - } -} - -function intersect (first, ...rest) { - return rest.reduce((accum, current) => { - return accum.filter(x => current.indexOf(x) !== -1) - }, first) -} - -function toPascalCase (str) { - return str[0].toUpperCase() + str.slice(1) -} - -module.exports = generate diff --git a/scripts/utils/index.js b/scripts/utils/index.js deleted file mode 100644 index 05d955b2e..000000000 --- a/scripts/utils/index.js +++ /dev/null @@ -1,34 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -'use strict' - -const generate = require('./generateApis') -const cloneAndCheckout = require('./clone-es') -const genFactory = require('./generateMain') -const generateDocs = require('./generateDocs') -const generateRequestTypes = require('./generateRequestTypes') - -module.exports = { - generate, - cloneAndCheckout, - genFactory, - generateDocs, - generateRequestTypes -} diff --git a/scripts/utils/patch.json b/scripts/utils/patch.json deleted file mode 100644 index 3023a6271..000000000 --- a/scripts/utils/patch.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "6": { - "_source_includes": "_source_include", - "_source_excludes": "_source_exclude" - }, - "7": { - "_source_includes": "_source_include", - "_source_excludes": "_source_exclude" - }, - "8": { - "_source_includes": "_source_include", - "_source_excludes": "_source_exclude" - } -} diff --git a/scripts/wait-cluster.sh b/scripts/wait-cluster.sh deleted file mode 100755 index 4cacaa4b6..000000000 --- a/scripts/wait-cluster.sh +++ /dev/null @@ -1,21 +0,0 @@ -#!/bin/bash - -TEST_ES_SERVER=${TEST_ES_SERVER:-"http://localhost:9200/"} - -attempt_counter=0 -max_attempts=5 -url="${TEST_ES_SERVER}/_cluster/health?wait_for_status=green&timeout=50s" - -echo "Waiting for Elasticsearch..." -while [[ "$(curl -s -o /dev/null -w ''%{http_code}'' --max-time 55 "$url")" != "200" ]]; do - if [ ${attempt_counter} -eq ${max_attempts} ];then - echo "\nCouldn't connect to Elasticsearch" - exit 1 - fi - - printf '.' - attempt_counter=$(($attempt_counter+1)) - sleep 5 -done - -echo "\nReady" diff --git a/src/api/api/async_search.ts b/src/api/api/async_search.ts index b3dd631c1..4e9a0cf16 100644 --- a/src/api/api/async_search.ts +++ b/src/api/api/async_search.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,23 +21,147 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class AsyncSearch { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'async_search.delete': { + path: [ + 'id' + ], + body: [], + query: [] + }, + 'async_search.get': { + path: [ + 'id' + ], + body: [], + query: [ + 'keep_alive', + 'typed_keys', + 'wait_for_completion_timeout' + ] + }, + 'async_search.status': { + path: [ + 'id' + ], + body: [], + query: [ + 'keep_alive' + ] + }, + 'async_search.submit': { + path: [ + 'index' + ], + body: [ + 'aggregations', + 'aggs', + 'collapse', + 'explain', + 'ext', + 'from', + 'highlight', + 'track_total_hits', + 'indices_boost', + 'docvalue_fields', + 'knn', + 'min_score', + 'post_filter', + 'profile', + 'query', + 'rescore', + 'script_fields', + 'search_after', + 'size', + 'slice', + 'sort', + '_source', + 'fields', + 'suggest', + 'terminate_after', + 'timeout', + 'track_scores', + 'version', + 'seq_no_primary_term', + 'stored_fields', + 'pit', + 'runtime_mappings', + 'stats' + ], + query: [ + 'wait_for_completion_timeout', + 'keep_alive', + 'keep_on_completion', + 'allow_no_indices', + 'allow_partial_search_results', + 'analyzer', + 'analyze_wildcard', + 'batched_reduce_size', + 'ccs_minimize_roundtrips', + 'default_operator', + 'df', + 'docvalue_fields', + 'expand_wildcards', + 'explain', + 'ignore_throttled', + 'ignore_unavailable', + 'lenient', + 'max_concurrent_shard_requests', + 'preference', + 'request_cache', + 'routing', + 'search_type', + 'stats', + 'stored_fields', + 'suggest_field', + 'suggest_mode', + 'suggest_size', + 'suggest_text', + 'terminate_after', + 'timeout', + 'track_total_hits', + 'track_scores', + 'typed_keys', + 'rest_total_hits_as_int', + 'version', + '_source', + '_source_excludes', + '_source_includes', + 'seq_no_primary_term', + 'q', + 'size', + 'from', + 'sort' + ] + } + } } /** * Delete an async search. If the asynchronous search is still running, it is cancelled. Otherwise, the saved search results are deleted. If the Elasticsearch security features are enabled, the deletion of a specific async search is restricted to: the authenticated user that submitted the original search request; users that have the `cancel_task` cluster privilege. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-async-search-submit | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-async-search-submit | Elasticsearch API documentation} */ async delete (this: That, params: T.AsyncSearchDeleteRequest, options?: TransportRequestOptionsWithOutMeta): Promise async delete (this: That, params: T.AsyncSearchDeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> async delete (this: That, params: T.AsyncSearchDeleteRequest, options?: TransportRequestOptions): Promise async delete (this: That, params: T.AsyncSearchDeleteRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] + const { + path: acceptedPath + } = this.acceptedParams['async_search.delete'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -87,13 +197,16 @@ export default class AsyncSearch { /** * Get async search results. Retrieve the results of a previously submitted asynchronous search request. If the Elasticsearch security features are enabled, access to the results of a specific async search is restricted to the user or API key that submitted it. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-async-search-submit | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-async-search-submit | Elasticsearch API documentation} */ async get> (this: That, params: T.AsyncSearchGetRequest, options?: TransportRequestOptionsWithOutMeta): Promise> async get> (this: That, params: T.AsyncSearchGetRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> async get> (this: That, params: T.AsyncSearchGetRequest, options?: TransportRequestOptions): Promise> async get> (this: That, params: T.AsyncSearchGetRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] + const { + path: acceptedPath + } = this.acceptedParams['async_search.get'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -129,13 +242,16 @@ export default class AsyncSearch { /** * Get the async search status. Get the status of a previously submitted async search request given its identifier, without retrieving search results. If the Elasticsearch security features are enabled, the access to the status of a specific async search is restricted to: * The user or API key that submitted the original async search request. * Users that have the `monitor` cluster privilege or greater privileges. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-async-search-submit | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-async-search-submit | Elasticsearch API documentation} */ async status (this: That, params: T.AsyncSearchStatusRequest, options?: TransportRequestOptionsWithOutMeta): Promise async status (this: That, params: T.AsyncSearchStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> async status (this: That, params: T.AsyncSearchStatusRequest, options?: TransportRequestOptions): Promise async status (this: That, params: T.AsyncSearchStatusRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] + const { + path: acceptedPath + } = this.acceptedParams['async_search.status'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -171,14 +287,18 @@ export default class AsyncSearch { /** * Run an async search. When the primary sort of the results is an indexed field, shards get sorted based on minimum and maximum value that they hold for that field. Partial results become available following the sort criteria that was requested. Warning: Asynchronous search does not support scroll or search requests that include only the suggest section. By default, Elasticsearch does not allow you to store an async search response larger than 10Mb and an attempt to do this results in an error. The maximum allowed size for a stored async search response can be set by changing the `search.max_async_search_response_size` cluster level setting. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-async-search-submit | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-async-search-submit | Elasticsearch API documentation} */ async submit> (this: That, params?: T.AsyncSearchSubmitRequest, options?: TransportRequestOptionsWithOutMeta): Promise> async submit> (this: That, params?: T.AsyncSearchSubmitRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> async submit> (this: That, params?: T.AsyncSearchSubmitRequest, options?: TransportRequestOptions): Promise> async submit> (this: That, params?: T.AsyncSearchSubmitRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['aggregations', 'aggs', 'collapse', 'explain', 'ext', 'from', 'highlight', 'track_total_hits', 'indices_boost', 'docvalue_fields', 'knn', 'min_score', 'post_filter', 'profile', 'query', 'rescore', 'script_fields', 'search_after', 'size', 'slice', 'sort', '_source', 'fields', 'suggest', 'terminate_after', 'timeout', 'track_scores', 'version', 'seq_no_primary_term', 'stored_fields', 'pit', 'runtime_mappings', 'stats'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['async_search.submit'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -205,8 +325,14 @@ export default class AsyncSearch { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/autoscaling.ts b/src/api/api/autoscaling.ts index 7f123c5a2..f6036e466 100644 --- a/src/api/api/autoscaling.ts +++ b/src/api/api/autoscaling.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,23 +21,73 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class Autoscaling { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'autoscaling.delete_autoscaling_policy': { + path: [ + 'name' + ], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'autoscaling.get_autoscaling_capacity': { + path: [], + body: [], + query: [ + 'master_timeout' + ] + }, + 'autoscaling.get_autoscaling_policy': { + path: [ + 'name' + ], + body: [], + query: [ + 'master_timeout' + ] + }, + 'autoscaling.put_autoscaling_policy': { + path: [ + 'name' + ], + body: [ + 'policy' + ], + query: [ + 'master_timeout', + 'timeout' + ] + } + } } /** * Delete an autoscaling policy. NOTE: This feature is designed for indirect use by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-autoscaling-delete-autoscaling-policy | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-autoscaling-delete-autoscaling-policy | Elasticsearch API documentation} */ async deleteAutoscalingPolicy (this: That, params: T.AutoscalingDeleteAutoscalingPolicyRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteAutoscalingPolicy (this: That, params: T.AutoscalingDeleteAutoscalingPolicyRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteAutoscalingPolicy (this: That, params: T.AutoscalingDeleteAutoscalingPolicyRequest, options?: TransportRequestOptions): Promise async deleteAutoscalingPolicy (this: That, params: T.AutoscalingDeleteAutoscalingPolicyRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['autoscaling.delete_autoscaling_policy'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -87,13 +123,16 @@ export default class Autoscaling { /** * Get the autoscaling capacity. NOTE: This feature is designed for indirect use by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. This API gets the current autoscaling capacity based on the configured autoscaling policy. It will return information to size the cluster appropriately to the current workload. The `required_capacity` is calculated as the maximum of the `required_capacity` result of all individual deciders that are enabled for the policy. The operator should verify that the `current_nodes` match the operator’s knowledge of the cluster to avoid making autoscaling decisions based on stale or incomplete information. The response contains decider-specific information you can use to diagnose how and why autoscaling determined a certain capacity was required. This information is provided for diagnosis only. Do not use this information to make autoscaling decisions. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-autoscaling-get-autoscaling-capacity | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-autoscaling-get-autoscaling-capacity | Elasticsearch API documentation} */ async getAutoscalingCapacity (this: That, params?: T.AutoscalingGetAutoscalingCapacityRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getAutoscalingCapacity (this: That, params?: T.AutoscalingGetAutoscalingCapacityRequest, options?: TransportRequestOptionsWithMeta): Promise> async getAutoscalingCapacity (this: That, params?: T.AutoscalingGetAutoscalingCapacityRequest, options?: TransportRequestOptions): Promise async getAutoscalingCapacity (this: That, params?: T.AutoscalingGetAutoscalingCapacityRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['autoscaling.get_autoscaling_capacity'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -127,13 +166,16 @@ export default class Autoscaling { /** * Get an autoscaling policy. NOTE: This feature is designed for indirect use by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-autoscaling-get-autoscaling-capacity | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-autoscaling-get-autoscaling-capacity | Elasticsearch API documentation} */ async getAutoscalingPolicy (this: That, params: T.AutoscalingGetAutoscalingPolicyRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getAutoscalingPolicy (this: That, params: T.AutoscalingGetAutoscalingPolicyRequest, options?: TransportRequestOptionsWithMeta): Promise> async getAutoscalingPolicy (this: That, params: T.AutoscalingGetAutoscalingPolicyRequest, options?: TransportRequestOptions): Promise async getAutoscalingPolicy (this: That, params: T.AutoscalingGetAutoscalingPolicyRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['autoscaling.get_autoscaling_policy'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -169,14 +211,18 @@ export default class Autoscaling { /** * Create or update an autoscaling policy. NOTE: This feature is designed for indirect use by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-autoscaling-put-autoscaling-policy | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-autoscaling-put-autoscaling-policy | Elasticsearch API documentation} */ async putAutoscalingPolicy (this: That, params: T.AutoscalingPutAutoscalingPolicyRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putAutoscalingPolicy (this: That, params: T.AutoscalingPutAutoscalingPolicyRequest, options?: TransportRequestOptionsWithMeta): Promise> async putAutoscalingPolicy (this: That, params: T.AutoscalingPutAutoscalingPolicyRequest, options?: TransportRequestOptions): Promise async putAutoscalingPolicy (this: That, params: T.AutoscalingPutAutoscalingPolicyRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] - const acceptedBody: string[] = ['policy'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['autoscaling.put_autoscaling_policy'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -188,8 +234,14 @@ export default class Autoscaling { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/bulk.ts b/src/api/api/bulk.ts index ccdedfcb2..55da8bdcd 100644 --- a/src/api/api/bulk.ts +++ b/src/api/api/bulk.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,18 +21,52 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +const acceptedParams: Record = { + bulk: { + path: [ + 'index' + ], + body: [ + 'operations' + ], + query: [ + 'include_source_on_error', + 'list_executed_pipelines', + 'pipeline', + 'refresh', + 'routing', + '_source', + '_source_excludes', + '_source_includes', + 'timeout', + 'wait_for_active_shards', + 'require_alias', + 'require_data_stream' + ] + } +} /** * Bulk index or delete documents. Perform multiple `index`, `create`, `delete`, and `update` actions in a single request. This reduces overhead and can greatly increase indexing speed. If the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or index alias: * To use the `create` action, you must have the `create_doc`, `create`, `index`, or `write` index privilege. Data streams support only the `create` action. * To use the `index` action, you must have the `create`, `index`, or `write` index privilege. * To use the `delete` action, you must have the `delete` or `write` index privilege. * To use the `update` action, you must have the `index` or `write` index privilege. * To automatically create a data stream or index with a bulk API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege. * To make the result of a bulk operation visible to search using the `refresh` parameter, you must have the `maintenance` or `manage` index privilege. Automatic data stream creation requires a matching index template with data stream enabled. The actions are specified in the request body using a newline delimited JSON (NDJSON) structure: ``` action_and_meta_data\n optional_source\n action_and_meta_data\n optional_source\n .... action_and_meta_data\n optional_source\n ``` The `index` and `create` actions expect a source on the next line and have the same semantics as the `op_type` parameter in the standard index API. A `create` action fails if a document with the same ID already exists in the target An `index` action adds or replaces a document as necessary. NOTE: Data streams support only the `create` action. To update or delete a document in a data stream, you must target the backing index containing the document. An `update` action expects that the partial doc, upsert, and script and its options are specified on the next line. A `delete` action does not expect a source on the next line and has the same semantics as the standard delete API. NOTE: The final line of data must end with a newline character (`\n`). Each newline character may be preceded by a carriage return (`\r`). When sending NDJSON data to the `_bulk` endpoint, use a `Content-Type` header of `application/json` or `application/x-ndjson`. Because this format uses literal newline characters (`\n`) as delimiters, make sure that the JSON actions and sources are not pretty printed. If you provide a target in the request path, it is used for any actions that don't explicitly specify an `_index` argument. A note on the format: the idea here is to make processing as fast as possible. As some of the actions are redirected to other shards on other nodes, only `action_meta_data` is parsed on the receiving node side. Client libraries using this protocol should try and strive to do something similar on the client side, and reduce buffering as much as possible. There is no "correct" number of actions to perform in a single bulk request. Experiment with different settings to find the optimal size for your particular workload. Note that Elasticsearch limits the maximum size of a HTTP request to 100mb by default so clients must ensure that no request exceeds this size. It is not possible to index a single document that exceeds the size limit, so you must pre-process any such documents into smaller pieces before sending them to Elasticsearch. For instance, split documents into pages or chapters before indexing them, or store raw binary data in a system outside Elasticsearch and replace the raw data with a link to the external system in the documents that you send to Elasticsearch. **Client suppport for bulk requests** Some of the officially supported clients provide helpers to assist with bulk requests and reindexing: * Go: Check out `esutil.BulkIndexer` * Perl: Check out `Search::Elasticsearch::Client::5_0::Bulk` and `Search::Elasticsearch::Client::5_0::Scroll` * Python: Check out `elasticsearch.helpers.*` * JavaScript: Check out `client.helpers.*` * .NET: Check out `BulkAllObservable` * PHP: Check out bulk indexing. **Submitting bulk requests with cURL** If you're providing text file input to `curl`, you must use the `--data-binary` flag instead of plain `-d`. The latter doesn't preserve newlines. For example: ``` $ cat requests { "index" : { "_index" : "test", "_id" : "1" } } { "field1" : "value1" } $ curl -s -H "Content-Type: application/x-ndjson" -XPOST localhost:9200/_bulk --data-binary "@requests"; echo {"took":7, "errors": false, "items":[{"index":{"_index":"test","_id":"1","_version":1,"result":"created","forced_refresh":false}}]} ``` **Optimistic concurrency control** Each `index` and `delete` action within a bulk API call may include the `if_seq_no` and `if_primary_term` parameters in their respective action and meta data lines. The `if_seq_no` and `if_primary_term` parameters control how operations are run, based on the last modification to existing documents. See Optimistic concurrency control for more details. **Versioning** Each bulk item can include the version value using the `version` field. It automatically follows the behavior of the index or delete operation based on the `_version` mapping. It also support the `version_type`. **Routing** Each bulk item can include the routing value using the `routing` field. It automatically follows the behavior of the index or delete operation based on the `_routing` mapping. NOTE: Data streams do not support custom routing unless they were created with the `allow_custom_routing` setting enabled in the template. **Wait for active shards** When making bulk calls, you can set the `wait_for_active_shards` parameter to require a minimum number of shard copies to be active before starting to process the bulk request. **Refresh** Control when the changes made by this request are visible to search. NOTE: Only the shards that receive the bulk request will be affected by refresh. Imagine a `_bulk?refresh=wait_for` request with three documents in it that happen to be routed to different shards in an index with five shards. The request will only wait for those three shards to refresh. The other two shards that make up the index do not participate in the `_bulk` request at all. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-bulk | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-bulk | Elasticsearch API documentation} */ export default async function BulkApi (this: That, params: T.BulkRequest, options?: TransportRequestOptionsWithOutMeta): Promise export default async function BulkApi (this: That, params: T.BulkRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function BulkApi (this: That, params: T.BulkRequest, options?: TransportRequestOptions): Promise export default async function BulkApi (this: That, params: T.BulkRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['operations'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = acceptedParams.bulk + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -58,8 +78,14 @@ export default async function BulkApi = { + capabilities: { + path: [], + body: [], + query: [] + } +} /** * Checks if the specified combination of method, API, parameters, and arbitrary capabilities are supported @@ -45,7 +42,10 @@ export default async function CapabilitiesApi (this: That, params?: T.TODO, opti export default async function CapabilitiesApi (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> export default async function CapabilitiesApi (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise export default async function CapabilitiesApi (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = acceptedParams.capabilities + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/cat.ts b/src/api/api/cat.ts index bc397b310..94bbe6434 100644 --- a/src/api/api/cat.ts +++ b/src/api/api/cat.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,23 +21,350 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} export default class Cat { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'cat.aliases': { + path: [ + 'name' + ], + body: [], + query: [ + 'h', + 's', + 'expand_wildcards', + 'master_timeout' + ] + }, + 'cat.allocation': { + path: [ + 'node_id' + ], + body: [], + query: [ + 'bytes', + 'h', + 's', + 'local', + 'master_timeout' + ] + }, + 'cat.component_templates': { + path: [ + 'name' + ], + body: [], + query: [ + 'h', + 's', + 'local', + 'master_timeout' + ] + }, + 'cat.count': { + path: [ + 'index' + ], + body: [], + query: [ + 'h', + 's' + ] + }, + 'cat.fielddata': { + path: [ + 'fields' + ], + body: [], + query: [ + 'bytes', + 'fields', + 'h', + 's' + ] + }, + 'cat.health': { + path: [], + body: [], + query: [ + 'time', + 'ts', + 'h', + 's' + ] + }, + 'cat.help': { + path: [], + body: [], + query: [] + }, + 'cat.indices': { + path: [ + 'index' + ], + body: [], + query: [ + 'bytes', + 'expand_wildcards', + 'health', + 'include_unloaded_segments', + 'pri', + 'time', + 'master_timeout', + 'h', + 's' + ] + }, + 'cat.master': { + path: [], + body: [], + query: [ + 'h', + 's', + 'local', + 'master_timeout' + ] + }, + 'cat.ml_data_frame_analytics': { + path: [ + 'id' + ], + body: [], + query: [ + 'allow_no_match', + 'bytes', + 'h', + 's', + 'time' + ] + }, + 'cat.ml_datafeeds': { + path: [ + 'datafeed_id' + ], + body: [], + query: [ + 'allow_no_match', + 'h', + 's', + 'time' + ] + }, + 'cat.ml_jobs': { + path: [ + 'job_id' + ], + body: [], + query: [ + 'allow_no_match', + 'bytes', + 'h', + 's', + 'time' + ] + }, + 'cat.ml_trained_models': { + path: [ + 'model_id' + ], + body: [], + query: [ + 'allow_no_match', + 'bytes', + 'h', + 's', + 'from', + 'size', + 'time' + ] + }, + 'cat.nodeattrs': { + path: [], + body: [], + query: [ + 'h', + 's', + 'local', + 'master_timeout' + ] + }, + 'cat.nodes': { + path: [], + body: [], + query: [ + 'bytes', + 'full_id', + 'include_unloaded_segments', + 'h', + 's', + 'master_timeout', + 'time' + ] + }, + 'cat.pending_tasks': { + path: [], + body: [], + query: [ + 'h', + 's', + 'local', + 'master_timeout', + 'time' + ] + }, + 'cat.plugins': { + path: [], + body: [], + query: [ + 'h', + 's', + 'include_bootstrap', + 'local', + 'master_timeout' + ] + }, + 'cat.recovery': { + path: [ + 'index' + ], + body: [], + query: [ + 'active_only', + 'bytes', + 'detailed', + 'index', + 'h', + 's', + 'time' + ] + }, + 'cat.repositories': { + path: [], + body: [], + query: [ + 'h', + 's', + 'local', + 'master_timeout' + ] + }, + 'cat.segments': { + path: [ + 'index' + ], + body: [], + query: [ + 'bytes', + 'h', + 's', + 'local', + 'master_timeout' + ] + }, + 'cat.shards': { + path: [ + 'index' + ], + body: [], + query: [ + 'bytes', + 'h', + 's', + 'master_timeout', + 'time' + ] + }, + 'cat.snapshots': { + path: [ + 'repository' + ], + body: [], + query: [ + 'ignore_unavailable', + 'h', + 's', + 'master_timeout', + 'time' + ] + }, + 'cat.tasks': { + path: [], + body: [], + query: [ + 'actions', + 'detailed', + 'nodes', + 'parent_task_id', + 'h', + 's', + 'time', + 'timeout', + 'wait_for_completion' + ] + }, + 'cat.templates': { + path: [ + 'name' + ], + body: [], + query: [ + 'h', + 's', + 'local', + 'master_timeout' + ] + }, + 'cat.thread_pool': { + path: [ + 'thread_pool_patterns' + ], + body: [], + query: [ + 'h', + 's', + 'time', + 'local', + 'master_timeout' + ] + }, + 'cat.transforms': { + path: [ + 'transform_id' + ], + body: [], + query: [ + 'allow_no_match', + 'from', + 'h', + 's', + 'time', + 'size' + ] + } + } } /** * Get aliases. Get the cluster's index aliases, including filter and routing information. This API does not return data stream aliases. IMPORTANT: CAT APIs are only intended for human consumption using the command line or the Kibana console. They are not intended for use by applications. For application consumption, use the aliases API. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-aliases | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-cat-aliases | Elasticsearch API documentation} */ async aliases (this: That, params?: T.CatAliasesRequest, options?: TransportRequestOptionsWithOutMeta): Promise async aliases (this: That, params?: T.CatAliasesRequest, options?: TransportRequestOptionsWithMeta): Promise> async aliases (this: That, params?: T.CatAliasesRequest, options?: TransportRequestOptions): Promise async aliases (this: That, params?: T.CatAliasesRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['cat.aliases'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -95,13 +408,16 @@ export default class Cat { /** * Get shard allocation information. Get a snapshot of the number of shards allocated to each data node and their disk space. IMPORTANT: CAT APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-allocation | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-cat-allocation | Elasticsearch API documentation} */ async allocation (this: That, params?: T.CatAllocationRequest, options?: TransportRequestOptionsWithOutMeta): Promise async allocation (this: That, params?: T.CatAllocationRequest, options?: TransportRequestOptionsWithMeta): Promise> async allocation (this: That, params?: T.CatAllocationRequest, options?: TransportRequestOptions): Promise async allocation (this: That, params?: T.CatAllocationRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['node_id'] + const { + path: acceptedPath + } = this.acceptedParams['cat.allocation'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -145,13 +461,16 @@ export default class Cat { /** * Get component templates. Get information about component templates in a cluster. Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases. IMPORTANT: CAT APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get component template API. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-component-templates | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-cat-component-templates | Elasticsearch API documentation} */ async componentTemplates (this: That, params?: T.CatComponentTemplatesRequest, options?: TransportRequestOptionsWithOutMeta): Promise async componentTemplates (this: That, params?: T.CatComponentTemplatesRequest, options?: TransportRequestOptionsWithMeta): Promise> async componentTemplates (this: That, params?: T.CatComponentTemplatesRequest, options?: TransportRequestOptions): Promise async componentTemplates (this: That, params?: T.CatComponentTemplatesRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['cat.component_templates'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -195,13 +514,16 @@ export default class Cat { /** * Get a document count. Get quick access to a document count for a data stream, an index, or an entire cluster. The document count only includes live documents, not deleted documents which have not yet been removed by the merge process. IMPORTANT: CAT APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the count API. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-count | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-cat-count | Elasticsearch API documentation} */ async count (this: That, params?: T.CatCountRequest, options?: TransportRequestOptionsWithOutMeta): Promise async count (this: That, params?: T.CatCountRequest, options?: TransportRequestOptionsWithMeta): Promise> async count (this: That, params?: T.CatCountRequest, options?: TransportRequestOptions): Promise async count (this: That, params?: T.CatCountRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] + const { + path: acceptedPath + } = this.acceptedParams['cat.count'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -245,13 +567,16 @@ export default class Cat { /** * Get field data cache information. Get the amount of heap memory currently used by the field data cache on every data node in the cluster. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes stats API. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-fielddata | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-cat-fielddata | Elasticsearch API documentation} */ async fielddata (this: That, params?: T.CatFielddataRequest, options?: TransportRequestOptionsWithOutMeta): Promise async fielddata (this: That, params?: T.CatFielddataRequest, options?: TransportRequestOptionsWithMeta): Promise> async fielddata (this: That, params?: T.CatFielddataRequest, options?: TransportRequestOptions): Promise async fielddata (this: That, params?: T.CatFielddataRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['fields'] + const { + path: acceptedPath + } = this.acceptedParams['cat.fielddata'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -295,13 +620,16 @@ export default class Cat { /** * Get the cluster health status. IMPORTANT: CAT APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the cluster health API. This API is often used to check malfunctioning clusters. To help you track cluster health alongside log files and alerting systems, the API returns timestamps in two formats: `HH:MM:SS`, which is human-readable but includes no date information; `Unix epoch time`, which is machine-sortable and includes date information. The latter format is useful for cluster recoveries that take multiple days. You can use the cat health API to verify cluster health across multiple nodes. You also can use the API to track the recovery of a large cluster over a longer period of time. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-health | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-cat-health | Elasticsearch API documentation} */ async health (this: That, params?: T.CatHealthRequest, options?: TransportRequestOptionsWithOutMeta): Promise async health (this: That, params?: T.CatHealthRequest, options?: TransportRequestOptionsWithMeta): Promise> async health (this: That, params?: T.CatHealthRequest, options?: TransportRequestOptions): Promise async health (this: That, params?: T.CatHealthRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['cat.health'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -335,13 +663,16 @@ export default class Cat { /** * Get CAT help. Get help for the CAT APIs. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-cat | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/group/endpoint-cat | Elasticsearch API documentation} */ async help (this: That, params?: T.CatHelpRequest, options?: TransportRequestOptionsWithOutMeta): Promise async help (this: That, params?: T.CatHelpRequest, options?: TransportRequestOptionsWithMeta): Promise> async help (this: That, params?: T.CatHelpRequest, options?: TransportRequestOptions): Promise async help (this: That, params?: T.CatHelpRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['cat.help'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -375,13 +706,16 @@ export default class Cat { /** * Get index information. Get high-level information about indices in a cluster, including backing indices for data streams. Use this request to get the following information for each index in a cluster: - shard count - document count - deleted document count - primary store size - total store size of all shards, including shard replicas These metrics are retrieved directly from Lucene, which Elasticsearch uses internally to power indexing and search. As a result, all document counts include hidden nested documents. To get an accurate count of Elasticsearch documents, use the cat count or count APIs. CAT APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use an index endpoint. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-indices | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-cat-indices | Elasticsearch API documentation} */ async indices (this: That, params?: T.CatIndicesRequest, options?: TransportRequestOptionsWithOutMeta): Promise async indices (this: That, params?: T.CatIndicesRequest, options?: TransportRequestOptionsWithMeta): Promise> async indices (this: That, params?: T.CatIndicesRequest, options?: TransportRequestOptions): Promise async indices (this: That, params?: T.CatIndicesRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] + const { + path: acceptedPath + } = this.acceptedParams['cat.indices'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -425,13 +759,16 @@ export default class Cat { /** * Get master node information. Get information about the master node, including the ID, bound IP address, and name. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-master | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-cat-master | Elasticsearch API documentation} */ async master (this: That, params?: T.CatMasterRequest, options?: TransportRequestOptionsWithOutMeta): Promise async master (this: That, params?: T.CatMasterRequest, options?: TransportRequestOptionsWithMeta): Promise> async master (this: That, params?: T.CatMasterRequest, options?: TransportRequestOptions): Promise async master (this: That, params?: T.CatMasterRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['cat.master'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -465,13 +802,16 @@ export default class Cat { /** * Get data frame analytics jobs. Get configuration and usage information about data frame analytics jobs. IMPORTANT: CAT APIs are only intended for human consumption using the Kibana console or command line. They are not intended for use by applications. For application consumption, use the get data frame analytics jobs statistics API. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-ml-data-frame-analytics | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-cat-ml-data-frame-analytics | Elasticsearch API documentation} */ async mlDataFrameAnalytics (this: That, params?: T.CatMlDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async mlDataFrameAnalytics (this: That, params?: T.CatMlDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> async mlDataFrameAnalytics (this: That, params?: T.CatMlDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise async mlDataFrameAnalytics (this: That, params?: T.CatMlDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] + const { + path: acceptedPath + } = this.acceptedParams['cat.ml_data_frame_analytics'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -515,13 +855,16 @@ export default class Cat { /** * Get datafeeds. Get configuration and usage information about datafeeds. This API returns a maximum of 10,000 datafeeds. If the Elasticsearch security features are enabled, you must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster privileges to use this API. IMPORTANT: CAT APIs are only intended for human consumption using the Kibana console or command line. They are not intended for use by applications. For application consumption, use the get datafeed statistics API. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-ml-datafeeds | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-cat-ml-datafeeds | Elasticsearch API documentation} */ async mlDatafeeds (this: That, params?: T.CatMlDatafeedsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async mlDatafeeds (this: That, params?: T.CatMlDatafeedsRequest, options?: TransportRequestOptionsWithMeta): Promise> async mlDatafeeds (this: That, params?: T.CatMlDatafeedsRequest, options?: TransportRequestOptions): Promise async mlDatafeeds (this: That, params?: T.CatMlDatafeedsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['datafeed_id'] + const { + path: acceptedPath + } = this.acceptedParams['cat.ml_datafeeds'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -565,13 +908,16 @@ export default class Cat { /** * Get anomaly detection jobs. Get configuration and usage information for anomaly detection jobs. This API returns a maximum of 10,000 jobs. If the Elasticsearch security features are enabled, you must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster privileges to use this API. IMPORTANT: CAT APIs are only intended for human consumption using the Kibana console or command line. They are not intended for use by applications. For application consumption, use the get anomaly detection job statistics API. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-ml-jobs | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-cat-ml-jobs | Elasticsearch API documentation} */ async mlJobs (this: That, params?: T.CatMlJobsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async mlJobs (this: That, params?: T.CatMlJobsRequest, options?: TransportRequestOptionsWithMeta): Promise> async mlJobs (this: That, params?: T.CatMlJobsRequest, options?: TransportRequestOptions): Promise async mlJobs (this: That, params?: T.CatMlJobsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['job_id'] + const { + path: acceptedPath + } = this.acceptedParams['cat.ml_jobs'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -615,13 +961,16 @@ export default class Cat { /** * Get trained models. Get configuration and usage information about inference trained models. IMPORTANT: CAT APIs are only intended for human consumption using the Kibana console or command line. They are not intended for use by applications. For application consumption, use the get trained models statistics API. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-ml-trained-models | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-cat-ml-trained-models | Elasticsearch API documentation} */ async mlTrainedModels (this: That, params?: T.CatMlTrainedModelsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async mlTrainedModels (this: That, params?: T.CatMlTrainedModelsRequest, options?: TransportRequestOptionsWithMeta): Promise> async mlTrainedModels (this: That, params?: T.CatMlTrainedModelsRequest, options?: TransportRequestOptions): Promise async mlTrainedModels (this: That, params?: T.CatMlTrainedModelsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['model_id'] + const { + path: acceptedPath + } = this.acceptedParams['cat.ml_trained_models'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -665,13 +1014,16 @@ export default class Cat { /** * Get node attribute information. Get information about custom node attributes. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-nodeattrs | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-cat-nodeattrs | Elasticsearch API documentation} */ async nodeattrs (this: That, params?: T.CatNodeattrsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async nodeattrs (this: That, params?: T.CatNodeattrsRequest, options?: TransportRequestOptionsWithMeta): Promise> async nodeattrs (this: That, params?: T.CatNodeattrsRequest, options?: TransportRequestOptions): Promise async nodeattrs (this: That, params?: T.CatNodeattrsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['cat.nodeattrs'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -705,13 +1057,16 @@ export default class Cat { /** * Get node information. Get information about the nodes in a cluster. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-nodes | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-cat-nodes | Elasticsearch API documentation} */ async nodes (this: That, params?: T.CatNodesRequest, options?: TransportRequestOptionsWithOutMeta): Promise async nodes (this: That, params?: T.CatNodesRequest, options?: TransportRequestOptionsWithMeta): Promise> async nodes (this: That, params?: T.CatNodesRequest, options?: TransportRequestOptions): Promise async nodes (this: That, params?: T.CatNodesRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['cat.nodes'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -745,13 +1100,16 @@ export default class Cat { /** * Get pending task information. Get information about cluster-level changes that have not yet taken effect. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the pending cluster tasks API. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-pending-tasks | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-cat-pending-tasks | Elasticsearch API documentation} */ async pendingTasks (this: That, params?: T.CatPendingTasksRequest, options?: TransportRequestOptionsWithOutMeta): Promise async pendingTasks (this: That, params?: T.CatPendingTasksRequest, options?: TransportRequestOptionsWithMeta): Promise> async pendingTasks (this: That, params?: T.CatPendingTasksRequest, options?: TransportRequestOptions): Promise async pendingTasks (this: That, params?: T.CatPendingTasksRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['cat.pending_tasks'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -785,13 +1143,16 @@ export default class Cat { /** * Get plugin information. Get a list of plugins running on each node of a cluster. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-plugins | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-cat-plugins | Elasticsearch API documentation} */ async plugins (this: That, params?: T.CatPluginsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async plugins (this: That, params?: T.CatPluginsRequest, options?: TransportRequestOptionsWithMeta): Promise> async plugins (this: That, params?: T.CatPluginsRequest, options?: TransportRequestOptions): Promise async plugins (this: That, params?: T.CatPluginsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['cat.plugins'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -825,13 +1186,16 @@ export default class Cat { /** * Get shard recovery information. Get information about ongoing and completed shard recoveries. Shard recovery is the process of initializing a shard copy, such as restoring a primary shard from a snapshot or syncing a replica shard from a primary shard. When a shard recovery completes, the recovered shard is available for search and indexing. For data streams, the API returns information about the stream’s backing indices. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the index recovery API. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-recovery | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-cat-recovery | Elasticsearch API documentation} */ async recovery (this: That, params?: T.CatRecoveryRequest, options?: TransportRequestOptionsWithOutMeta): Promise async recovery (this: That, params?: T.CatRecoveryRequest, options?: TransportRequestOptionsWithMeta): Promise> async recovery (this: That, params?: T.CatRecoveryRequest, options?: TransportRequestOptions): Promise async recovery (this: That, params?: T.CatRecoveryRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] + const { + path: acceptedPath + } = this.acceptedParams['cat.recovery'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -875,13 +1239,16 @@ export default class Cat { /** * Get snapshot repository information. Get a list of snapshot repositories for a cluster. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get snapshot repository API. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-repositories | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-cat-repositories | Elasticsearch API documentation} */ async repositories (this: That, params?: T.CatRepositoriesRequest, options?: TransportRequestOptionsWithOutMeta): Promise async repositories (this: That, params?: T.CatRepositoriesRequest, options?: TransportRequestOptionsWithMeta): Promise> async repositories (this: That, params?: T.CatRepositoriesRequest, options?: TransportRequestOptions): Promise async repositories (this: That, params?: T.CatRepositoriesRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['cat.repositories'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -915,13 +1282,16 @@ export default class Cat { /** * Get segment information. Get low-level information about the Lucene segments in index shards. For data streams, the API returns information about the backing indices. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the index segments API. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-segments | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-cat-segments | Elasticsearch API documentation} */ async segments (this: That, params?: T.CatSegmentsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async segments (this: That, params?: T.CatSegmentsRequest, options?: TransportRequestOptionsWithMeta): Promise> async segments (this: That, params?: T.CatSegmentsRequest, options?: TransportRequestOptions): Promise async segments (this: That, params?: T.CatSegmentsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] + const { + path: acceptedPath + } = this.acceptedParams['cat.segments'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -965,13 +1335,16 @@ export default class Cat { /** * Get shard information. Get information about the shards in a cluster. For data streams, the API returns information about the backing indices. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-shards | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-cat-shards | Elasticsearch API documentation} */ async shards (this: That, params?: T.CatShardsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async shards (this: That, params?: T.CatShardsRequest, options?: TransportRequestOptionsWithMeta): Promise> async shards (this: That, params?: T.CatShardsRequest, options?: TransportRequestOptions): Promise async shards (this: That, params?: T.CatShardsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] + const { + path: acceptedPath + } = this.acceptedParams['cat.shards'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1015,13 +1388,16 @@ export default class Cat { /** * Get snapshot information. Get information about the snapshots stored in one or more repositories. A snapshot is a backup of an index or running Elasticsearch cluster. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get snapshot API. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-snapshots | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-cat-snapshots | Elasticsearch API documentation} */ async snapshots (this: That, params?: T.CatSnapshotsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async snapshots (this: That, params?: T.CatSnapshotsRequest, options?: TransportRequestOptionsWithMeta): Promise> async snapshots (this: That, params?: T.CatSnapshotsRequest, options?: TransportRequestOptions): Promise async snapshots (this: That, params?: T.CatSnapshotsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['repository'] + const { + path: acceptedPath + } = this.acceptedParams['cat.snapshots'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1065,13 +1441,16 @@ export default class Cat { /** * Get task information. Get information about tasks currently running in the cluster. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the task management API. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-tasks | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-cat-tasks | Elasticsearch API documentation} */ async tasks (this: That, params?: T.CatTasksRequest, options?: TransportRequestOptionsWithOutMeta): Promise async tasks (this: That, params?: T.CatTasksRequest, options?: TransportRequestOptionsWithMeta): Promise> async tasks (this: That, params?: T.CatTasksRequest, options?: TransportRequestOptions): Promise async tasks (this: That, params?: T.CatTasksRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['cat.tasks'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1105,13 +1484,16 @@ export default class Cat { /** * Get index template information. Get information about the index templates in a cluster. You can use index templates to apply index settings and field mappings to new indices at creation. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get index template API. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-templates | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-cat-templates | Elasticsearch API documentation} */ async templates (this: That, params?: T.CatTemplatesRequest, options?: TransportRequestOptionsWithOutMeta): Promise async templates (this: That, params?: T.CatTemplatesRequest, options?: TransportRequestOptionsWithMeta): Promise> async templates (this: That, params?: T.CatTemplatesRequest, options?: TransportRequestOptions): Promise async templates (this: That, params?: T.CatTemplatesRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['cat.templates'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1155,13 +1537,16 @@ export default class Cat { /** * Get thread pool statistics. Get thread pool statistics for each node in a cluster. Returned information includes all built-in thread pools and custom thread pools. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-thread-pool | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-cat-thread-pool | Elasticsearch API documentation} */ async threadPool (this: That, params?: T.CatThreadPoolRequest, options?: TransportRequestOptionsWithOutMeta): Promise async threadPool (this: That, params?: T.CatThreadPoolRequest, options?: TransportRequestOptionsWithMeta): Promise> async threadPool (this: That, params?: T.CatThreadPoolRequest, options?: TransportRequestOptions): Promise async threadPool (this: That, params?: T.CatThreadPoolRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['thread_pool_patterns'] + const { + path: acceptedPath + } = this.acceptedParams['cat.thread_pool'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1205,13 +1590,16 @@ export default class Cat { /** * Get transform information. Get configuration and usage information about transforms. CAT APIs are only intended for human consumption using the Kibana console or command line. They are not intended for use by applications. For application consumption, use the get transform statistics API. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cat-transforms | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-cat-transforms | Elasticsearch API documentation} */ async transforms (this: That, params?: T.CatTransformsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async transforms (this: That, params?: T.CatTransformsRequest, options?: TransportRequestOptionsWithMeta): Promise> async transforms (this: That, params?: T.CatTransformsRequest, options?: TransportRequestOptions): Promise async transforms (this: That, params?: T.CatTransformsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['transform_id'] + const { + path: acceptedPath + } = this.acceptedParams['cat.transforms'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/ccr.ts b/src/api/api/ccr.ts index 29455527c..778fba020 100644 --- a/src/api/api/ccr.ts +++ b/src/api/api/ccr.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,23 +21,199 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class Ccr { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'ccr.delete_auto_follow_pattern': { + path: [ + 'name' + ], + body: [], + query: [ + 'master_timeout' + ] + }, + 'ccr.follow': { + path: [ + 'index' + ], + body: [ + 'data_stream_name', + 'leader_index', + 'max_outstanding_read_requests', + 'max_outstanding_write_requests', + 'max_read_request_operation_count', + 'max_read_request_size', + 'max_retry_delay', + 'max_write_buffer_count', + 'max_write_buffer_size', + 'max_write_request_operation_count', + 'max_write_request_size', + 'read_poll_timeout', + 'remote_cluster', + 'settings' + ], + query: [ + 'master_timeout', + 'wait_for_active_shards' + ] + }, + 'ccr.follow_info': { + path: [ + 'index' + ], + body: [], + query: [ + 'master_timeout' + ] + }, + 'ccr.follow_stats': { + path: [ + 'index' + ], + body: [], + query: [ + 'timeout' + ] + }, + 'ccr.forget_follower': { + path: [ + 'index' + ], + body: [ + 'follower_cluster', + 'follower_index', + 'follower_index_uuid', + 'leader_remote_cluster' + ], + query: [ + 'timeout' + ] + }, + 'ccr.get_auto_follow_pattern': { + path: [ + 'name' + ], + body: [], + query: [ + 'master_timeout' + ] + }, + 'ccr.pause_auto_follow_pattern': { + path: [ + 'name' + ], + body: [], + query: [ + 'master_timeout' + ] + }, + 'ccr.pause_follow': { + path: [ + 'index' + ], + body: [], + query: [ + 'master_timeout' + ] + }, + 'ccr.put_auto_follow_pattern': { + path: [ + 'name' + ], + body: [ + 'remote_cluster', + 'follow_index_pattern', + 'leader_index_patterns', + 'leader_index_exclusion_patterns', + 'max_outstanding_read_requests', + 'settings', + 'max_outstanding_write_requests', + 'read_poll_timeout', + 'max_read_request_operation_count', + 'max_read_request_size', + 'max_retry_delay', + 'max_write_buffer_count', + 'max_write_buffer_size', + 'max_write_request_operation_count', + 'max_write_request_size' + ], + query: [ + 'master_timeout' + ] + }, + 'ccr.resume_auto_follow_pattern': { + path: [ + 'name' + ], + body: [], + query: [ + 'master_timeout' + ] + }, + 'ccr.resume_follow': { + path: [ + 'index' + ], + body: [ + 'max_outstanding_read_requests', + 'max_outstanding_write_requests', + 'max_read_request_operation_count', + 'max_read_request_size', + 'max_retry_delay', + 'max_write_buffer_count', + 'max_write_buffer_size', + 'max_write_request_operation_count', + 'max_write_request_size', + 'read_poll_timeout' + ], + query: [ + 'master_timeout' + ] + }, + 'ccr.stats': { + path: [], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'ccr.unfollow': { + path: [ + 'index' + ], + body: [], + query: [ + 'master_timeout' + ] + } + } } /** * Delete auto-follow patterns. Delete a collection of cross-cluster replication auto-follow patterns. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-delete-auto-follow-pattern | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ccr-delete-auto-follow-pattern | Elasticsearch API documentation} */ async deleteAutoFollowPattern (this: That, params: T.CcrDeleteAutoFollowPatternRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteAutoFollowPattern (this: That, params: T.CcrDeleteAutoFollowPatternRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteAutoFollowPattern (this: That, params: T.CcrDeleteAutoFollowPatternRequest, options?: TransportRequestOptions): Promise async deleteAutoFollowPattern (this: That, params: T.CcrDeleteAutoFollowPatternRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['ccr.delete_auto_follow_pattern'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -87,14 +249,18 @@ export default class Ccr { /** * Create a follower. Create a cross-cluster replication follower index that follows a specific leader index. When the API returns, the follower index exists and cross-cluster replication starts replicating operations from the leader index to the follower index. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-follow | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ccr-follow | Elasticsearch API documentation} */ async follow (this: That, params: T.CcrFollowRequest, options?: TransportRequestOptionsWithOutMeta): Promise async follow (this: That, params: T.CcrFollowRequest, options?: TransportRequestOptionsWithMeta): Promise> async follow (this: That, params: T.CcrFollowRequest, options?: TransportRequestOptions): Promise async follow (this: That, params: T.CcrFollowRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['data_stream_name', 'leader_index', 'max_outstanding_read_requests', 'max_outstanding_write_requests', 'max_read_request_operation_count', 'max_read_request_size', 'max_retry_delay', 'max_write_buffer_count', 'max_write_buffer_size', 'max_write_request_operation_count', 'max_write_request_size', 'read_poll_timeout', 'remote_cluster', 'settings'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ccr.follow'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -116,8 +282,14 @@ export default class Ccr { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -134,13 +306,16 @@ export default class Ccr { /** * Get follower information. Get information about all cross-cluster replication follower indices. For example, the results include follower index names, leader index names, replication options, and whether the follower indices are active or paused. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-follow-info | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ccr-follow-info | Elasticsearch API documentation} */ async followInfo (this: That, params: T.CcrFollowInfoRequest, options?: TransportRequestOptionsWithOutMeta): Promise async followInfo (this: That, params: T.CcrFollowInfoRequest, options?: TransportRequestOptionsWithMeta): Promise> async followInfo (this: That, params: T.CcrFollowInfoRequest, options?: TransportRequestOptions): Promise async followInfo (this: That, params: T.CcrFollowInfoRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] + const { + path: acceptedPath + } = this.acceptedParams['ccr.follow_info'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -176,13 +351,16 @@ export default class Ccr { /** * Get follower stats. Get cross-cluster replication follower stats. The API returns shard-level stats about the "following tasks" associated with each shard for the specified indices. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-follow-stats | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ccr-follow-stats | Elasticsearch API documentation} */ async followStats (this: That, params: T.CcrFollowStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async followStats (this: That, params: T.CcrFollowStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> async followStats (this: That, params: T.CcrFollowStatsRequest, options?: TransportRequestOptions): Promise async followStats (this: That, params: T.CcrFollowStatsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] + const { + path: acceptedPath + } = this.acceptedParams['ccr.follow_stats'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -218,14 +396,18 @@ export default class Ccr { /** * Forget a follower. Remove the cross-cluster replication follower retention leases from the leader. A following index takes out retention leases on its leader index. These leases are used to increase the likelihood that the shards of the leader index retain the history of operations that the shards of the following index need to run replication. When a follower index is converted to a regular index by the unfollow API (either by directly calling the API or by index lifecycle management tasks), these leases are removed. However, removal of the leases can fail, for example when the remote cluster containing the leader index is unavailable. While the leases will eventually expire on their own, their extended existence can cause the leader index to hold more history than necessary and prevent index lifecycle management from performing some operations on the leader index. This API exists to enable manually removing the leases when the unfollow API is unable to do so. NOTE: This API does not stop replication by a following index. If you use this API with a follower index that is still actively following, the following index will add back retention leases on the leader. The only purpose of this API is to handle the case of failure to remove the following retention leases after the unfollow API is invoked. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-forget-follower | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ccr-forget-follower | Elasticsearch API documentation} */ async forgetFollower (this: That, params: T.CcrForgetFollowerRequest, options?: TransportRequestOptionsWithOutMeta): Promise async forgetFollower (this: That, params: T.CcrForgetFollowerRequest, options?: TransportRequestOptionsWithMeta): Promise> async forgetFollower (this: That, params: T.CcrForgetFollowerRequest, options?: TransportRequestOptions): Promise async forgetFollower (this: That, params: T.CcrForgetFollowerRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['follower_cluster', 'follower_index', 'follower_index_uuid', 'leader_remote_cluster'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ccr.forget_follower'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -247,8 +429,14 @@ export default class Ccr { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -265,13 +453,16 @@ export default class Ccr { /** * Get auto-follow patterns. Get cross-cluster replication auto-follow patterns. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-get-auto-follow-pattern-1 | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ccr-get-auto-follow-pattern-1 | Elasticsearch API documentation} */ async getAutoFollowPattern (this: That, params?: T.CcrGetAutoFollowPatternRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getAutoFollowPattern (this: That, params?: T.CcrGetAutoFollowPatternRequest, options?: TransportRequestOptionsWithMeta): Promise> async getAutoFollowPattern (this: That, params?: T.CcrGetAutoFollowPatternRequest, options?: TransportRequestOptions): Promise async getAutoFollowPattern (this: That, params?: T.CcrGetAutoFollowPatternRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['ccr.get_auto_follow_pattern'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -315,13 +506,16 @@ export default class Ccr { /** * Pause an auto-follow pattern. Pause a cross-cluster replication auto-follow pattern. When the API returns, the auto-follow pattern is inactive. New indices that are created on the remote cluster and match the auto-follow patterns are ignored. You can resume auto-following with the resume auto-follow pattern API. When it resumes, the auto-follow pattern is active again and automatically configures follower indices for newly created indices on the remote cluster that match its patterns. Remote indices that were created while the pattern was paused will also be followed, unless they have been deleted or closed in the interim. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-pause-auto-follow-pattern | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ccr-pause-auto-follow-pattern | Elasticsearch API documentation} */ async pauseAutoFollowPattern (this: That, params: T.CcrPauseAutoFollowPatternRequest, options?: TransportRequestOptionsWithOutMeta): Promise async pauseAutoFollowPattern (this: That, params: T.CcrPauseAutoFollowPatternRequest, options?: TransportRequestOptionsWithMeta): Promise> async pauseAutoFollowPattern (this: That, params: T.CcrPauseAutoFollowPatternRequest, options?: TransportRequestOptions): Promise async pauseAutoFollowPattern (this: That, params: T.CcrPauseAutoFollowPatternRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['ccr.pause_auto_follow_pattern'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -357,13 +551,16 @@ export default class Ccr { /** * Pause a follower. Pause a cross-cluster replication follower index. The follower index will not fetch any additional operations from the leader index. You can resume following with the resume follower API. You can pause and resume a follower index to change the configuration of the following task. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-pause-follow | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ccr-pause-follow | Elasticsearch API documentation} */ async pauseFollow (this: That, params: T.CcrPauseFollowRequest, options?: TransportRequestOptionsWithOutMeta): Promise async pauseFollow (this: That, params: T.CcrPauseFollowRequest, options?: TransportRequestOptionsWithMeta): Promise> async pauseFollow (this: That, params: T.CcrPauseFollowRequest, options?: TransportRequestOptions): Promise async pauseFollow (this: That, params: T.CcrPauseFollowRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] + const { + path: acceptedPath + } = this.acceptedParams['ccr.pause_follow'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -399,14 +596,18 @@ export default class Ccr { /** * Create or update auto-follow patterns. Create a collection of cross-cluster replication auto-follow patterns for a remote cluster. Newly created indices on the remote cluster that match any of the patterns are automatically configured as follower indices. Indices on the remote cluster that were created before the auto-follow pattern was created will not be auto-followed even if they match the pattern. This API can also be used to update auto-follow patterns. NOTE: Follower indices that were configured automatically before updating an auto-follow pattern will remain unchanged even if they do not match against the new patterns. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-put-auto-follow-pattern | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ccr-put-auto-follow-pattern | Elasticsearch API documentation} */ async putAutoFollowPattern (this: That, params: T.CcrPutAutoFollowPatternRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putAutoFollowPattern (this: That, params: T.CcrPutAutoFollowPatternRequest, options?: TransportRequestOptionsWithMeta): Promise> async putAutoFollowPattern (this: That, params: T.CcrPutAutoFollowPatternRequest, options?: TransportRequestOptions): Promise async putAutoFollowPattern (this: That, params: T.CcrPutAutoFollowPatternRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] - const acceptedBody: string[] = ['remote_cluster', 'follow_index_pattern', 'leader_index_patterns', 'leader_index_exclusion_patterns', 'max_outstanding_read_requests', 'settings', 'max_outstanding_write_requests', 'read_poll_timeout', 'max_read_request_operation_count', 'max_read_request_size', 'max_retry_delay', 'max_write_buffer_count', 'max_write_buffer_size', 'max_write_request_operation_count', 'max_write_request_size'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ccr.put_auto_follow_pattern'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -428,8 +629,14 @@ export default class Ccr { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -446,13 +653,16 @@ export default class Ccr { /** * Resume an auto-follow pattern. Resume a cross-cluster replication auto-follow pattern that was paused. The auto-follow pattern will resume configuring following indices for newly created indices that match its patterns on the remote cluster. Remote indices created while the pattern was paused will also be followed unless they have been deleted or closed in the interim. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-resume-auto-follow-pattern | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ccr-resume-auto-follow-pattern | Elasticsearch API documentation} */ async resumeAutoFollowPattern (this: That, params: T.CcrResumeAutoFollowPatternRequest, options?: TransportRequestOptionsWithOutMeta): Promise async resumeAutoFollowPattern (this: That, params: T.CcrResumeAutoFollowPatternRequest, options?: TransportRequestOptionsWithMeta): Promise> async resumeAutoFollowPattern (this: That, params: T.CcrResumeAutoFollowPatternRequest, options?: TransportRequestOptions): Promise async resumeAutoFollowPattern (this: That, params: T.CcrResumeAutoFollowPatternRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['ccr.resume_auto_follow_pattern'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -488,14 +698,18 @@ export default class Ccr { /** * Resume a follower. Resume a cross-cluster replication follower index that was paused. The follower index could have been paused with the pause follower API. Alternatively it could be paused due to replication that cannot be retried due to failures during following tasks. When this API returns, the follower index will resume fetching operations from the leader index. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-resume-follow | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ccr-resume-follow | Elasticsearch API documentation} */ async resumeFollow (this: That, params: T.CcrResumeFollowRequest, options?: TransportRequestOptionsWithOutMeta): Promise async resumeFollow (this: That, params: T.CcrResumeFollowRequest, options?: TransportRequestOptionsWithMeta): Promise> async resumeFollow (this: That, params: T.CcrResumeFollowRequest, options?: TransportRequestOptions): Promise async resumeFollow (this: That, params: T.CcrResumeFollowRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['max_outstanding_read_requests', 'max_outstanding_write_requests', 'max_read_request_operation_count', 'max_read_request_size', 'max_retry_delay', 'max_write_buffer_count', 'max_write_buffer_size', 'max_write_request_operation_count', 'max_write_request_size', 'read_poll_timeout'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ccr.resume_follow'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -517,8 +731,14 @@ export default class Ccr { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -535,13 +755,16 @@ export default class Ccr { /** * Get cross-cluster replication stats. This API returns stats about auto-following and the same shard-level stats as the get follower stats API. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-stats | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ccr-stats | Elasticsearch API documentation} */ async stats (this: That, params?: T.CcrStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async stats (this: That, params?: T.CcrStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> async stats (this: That, params?: T.CcrStatsRequest, options?: TransportRequestOptions): Promise async stats (this: That, params?: T.CcrStatsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['ccr.stats'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -575,13 +798,16 @@ export default class Ccr { /** * Unfollow an index. Convert a cross-cluster replication follower index to a regular index. The API stops the following task associated with a follower index and removes index metadata and settings associated with cross-cluster replication. The follower index must be paused and closed before you call the unfollow API. > info > Currently cross-cluster replication does not support converting an existing regular index to a follower index. Converting a follower index to a regular index is an irreversible operation. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ccr-unfollow | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ccr-unfollow | Elasticsearch API documentation} */ async unfollow (this: That, params: T.CcrUnfollowRequest, options?: TransportRequestOptionsWithOutMeta): Promise async unfollow (this: That, params: T.CcrUnfollowRequest, options?: TransportRequestOptionsWithMeta): Promise> async unfollow (this: That, params: T.CcrUnfollowRequest, options?: TransportRequestOptions): Promise async unfollow (this: That, params: T.CcrUnfollowRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] + const { + path: acceptedPath + } = this.acceptedParams['ccr.unfollow'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/clear_scroll.ts b/src/api/api/clear_scroll.ts index 7b7258503..da942a980 100644 --- a/src/api/api/clear_scroll.ts +++ b/src/api/api/clear_scroll.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,18 +21,37 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +const acceptedParams: Record = { + clear_scroll: { + path: [], + body: [ + 'scroll_id' + ], + query: [] + } +} /** * Clear a scrolling search. Clear the search context and results for a scrolling search. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-clear-scroll | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-clear-scroll | Elasticsearch API documentation} */ export default async function ClearScrollApi (this: That, params?: T.ClearScrollRequest, options?: TransportRequestOptionsWithOutMeta): Promise export default async function ClearScrollApi (this: That, params?: T.ClearScrollRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function ClearScrollApi (this: That, params?: T.ClearScrollRequest, options?: TransportRequestOptions): Promise export default async function ClearScrollApi (this: That, params?: T.ClearScrollRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['scroll_id'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = acceptedParams.clear_scroll + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -69,8 +74,14 @@ export default async function ClearScrollApi (this: That, params?: T.ClearScroll } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/close_point_in_time.ts b/src/api/api/close_point_in_time.ts index 26d5b0e26..b7dc4a7dd 100644 --- a/src/api/api/close_point_in_time.ts +++ b/src/api/api/close_point_in_time.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,18 +21,37 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +const acceptedParams: Record = { + close_point_in_time: { + path: [], + body: [ + 'id' + ], + query: [] + } +} /** * Close a point in time. A point in time must be opened explicitly before being used in search requests. The `keep_alive` parameter tells Elasticsearch how long it should persist. A point in time is automatically closed when the `keep_alive` period has elapsed. However, keeping points in time has a cost; close them as soon as they are no longer required for search requests. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-open-point-in-time | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-open-point-in-time | Elasticsearch API documentation} */ export default async function ClosePointInTimeApi (this: That, params: T.ClosePointInTimeRequest, options?: TransportRequestOptionsWithOutMeta): Promise export default async function ClosePointInTimeApi (this: That, params: T.ClosePointInTimeRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function ClosePointInTimeApi (this: That, params: T.ClosePointInTimeRequest, options?: TransportRequestOptions): Promise export default async function ClosePointInTimeApi (this: That, params: T.ClosePointInTimeRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['id'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = acceptedParams.close_point_in_time + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -68,8 +73,14 @@ export default async function ClosePointInTimeApi (this: That, params: T.ClosePo } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/cluster.ts b/src/api/api/cluster.ts index 730c942d2..2ce1d1eca 100644 --- a/src/api/api/cluster.ts +++ b/src/api/api/cluster.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,24 +21,218 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class Cluster { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'cluster.allocation_explain': { + path: [], + body: [ + 'current_node', + 'index', + 'primary', + 'shard' + ], + query: [ + 'include_disk_info', + 'include_yes_decisions', + 'master_timeout' + ] + }, + 'cluster.delete_component_template': { + path: [ + 'name' + ], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'cluster.delete_voting_config_exclusions': { + path: [], + body: [], + query: [ + 'master_timeout', + 'wait_for_removal' + ] + }, + 'cluster.exists_component_template': { + path: [ + 'name' + ], + body: [], + query: [ + 'master_timeout', + 'local' + ] + }, + 'cluster.get_component_template': { + path: [ + 'name' + ], + body: [], + query: [ + 'flat_settings', + 'include_defaults', + 'local', + 'master_timeout' + ] + }, + 'cluster.get_settings': { + path: [], + body: [], + query: [ + 'flat_settings', + 'include_defaults', + 'master_timeout', + 'timeout' + ] + }, + 'cluster.health': { + path: [ + 'index' + ], + body: [], + query: [ + 'expand_wildcards', + 'level', + 'local', + 'master_timeout', + 'timeout', + 'wait_for_active_shards', + 'wait_for_events', + 'wait_for_nodes', + 'wait_for_no_initializing_shards', + 'wait_for_no_relocating_shards', + 'wait_for_status' + ] + }, + 'cluster.info': { + path: [ + 'target' + ], + body: [], + query: [] + }, + 'cluster.pending_tasks': { + path: [], + body: [], + query: [ + 'local', + 'master_timeout' + ] + }, + 'cluster.post_voting_config_exclusions': { + path: [], + body: [], + query: [ + 'node_names', + 'node_ids', + 'master_timeout', + 'timeout' + ] + }, + 'cluster.put_component_template': { + path: [ + 'name' + ], + body: [ + 'template', + 'version', + '_meta', + 'deprecated' + ], + query: [ + 'create', + 'master_timeout' + ] + }, + 'cluster.put_settings': { + path: [], + body: [ + 'persistent', + 'transient' + ], + query: [ + 'flat_settings', + 'master_timeout', + 'timeout' + ] + }, + 'cluster.remote_info': { + path: [], + body: [], + query: [] + }, + 'cluster.reroute': { + path: [], + body: [ + 'commands' + ], + query: [ + 'dry_run', + 'explain', + 'metric', + 'retry_failed', + 'master_timeout', + 'timeout' + ] + }, + 'cluster.state': { + path: [ + 'metric', + 'index' + ], + body: [], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'flat_settings', + 'ignore_unavailable', + 'local', + 'master_timeout', + 'wait_for_metadata_version', + 'wait_for_timeout' + ] + }, + 'cluster.stats': { + path: [ + 'node_id' + ], + body: [], + query: [ + 'include_remotes', + 'timeout' + ] + } + } } /** * Explain the shard allocations. Get explanations for shard allocations in the cluster. For unassigned shards, it provides an explanation for why the shard is unassigned. For assigned shards, it provides an explanation for why the shard is remaining on its current node and has not moved or rebalanced to another node. This API can be very useful when attempting to diagnose why a shard is unassigned or why a shard continues to remain on its current node when you might expect otherwise. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-allocation-explain | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-cluster-allocation-explain | Elasticsearch API documentation} */ async allocationExplain (this: That, params?: T.ClusterAllocationExplainRequest, options?: TransportRequestOptionsWithOutMeta): Promise async allocationExplain (this: That, params?: T.ClusterAllocationExplainRequest, options?: TransportRequestOptionsWithMeta): Promise> async allocationExplain (this: That, params?: T.ClusterAllocationExplainRequest, options?: TransportRequestOptions): Promise async allocationExplain (this: That, params?: T.ClusterAllocationExplainRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['current_node', 'index', 'primary', 'shard'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['cluster.allocation_explain'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -75,8 +255,14 @@ export default class Cluster { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -90,13 +276,16 @@ export default class Cluster { /** * Delete component templates. Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-put-component-template | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-cluster-put-component-template | Elasticsearch API documentation} */ async deleteComponentTemplate (this: That, params: T.ClusterDeleteComponentTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteComponentTemplate (this: That, params: T.ClusterDeleteComponentTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteComponentTemplate (this: That, params: T.ClusterDeleteComponentTemplateRequest, options?: TransportRequestOptions): Promise async deleteComponentTemplate (this: That, params: T.ClusterDeleteComponentTemplateRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['cluster.delete_component_template'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -132,13 +321,16 @@ export default class Cluster { /** * Clear cluster voting config exclusions. Remove master-eligible nodes from the voting configuration exclusion list. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-post-voting-config-exclusions | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-cluster-post-voting-config-exclusions | Elasticsearch API documentation} */ async deleteVotingConfigExclusions (this: That, params?: T.ClusterDeleteVotingConfigExclusionsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteVotingConfigExclusions (this: That, params?: T.ClusterDeleteVotingConfigExclusionsRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteVotingConfigExclusions (this: That, params?: T.ClusterDeleteVotingConfigExclusionsRequest, options?: TransportRequestOptions): Promise async deleteVotingConfigExclusions (this: That, params?: T.ClusterDeleteVotingConfigExclusionsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['cluster.delete_voting_config_exclusions'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -172,13 +364,16 @@ export default class Cluster { /** * Check component templates. Returns information about whether a particular component template exists. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-put-component-template | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-cluster-put-component-template | Elasticsearch API documentation} */ async existsComponentTemplate (this: That, params: T.ClusterExistsComponentTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise async existsComponentTemplate (this: That, params: T.ClusterExistsComponentTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> async existsComponentTemplate (this: That, params: T.ClusterExistsComponentTemplateRequest, options?: TransportRequestOptions): Promise async existsComponentTemplate (this: That, params: T.ClusterExistsComponentTemplateRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['cluster.exists_component_template'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -214,13 +409,16 @@ export default class Cluster { /** * Get component templates. Get information about component templates. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-put-component-template | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-cluster-put-component-template | Elasticsearch API documentation} */ async getComponentTemplate (this: That, params?: T.ClusterGetComponentTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getComponentTemplate (this: That, params?: T.ClusterGetComponentTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> async getComponentTemplate (this: That, params?: T.ClusterGetComponentTemplateRequest, options?: TransportRequestOptions): Promise async getComponentTemplate (this: That, params?: T.ClusterGetComponentTemplateRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['cluster.get_component_template'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -264,13 +462,16 @@ export default class Cluster { /** * Get cluster-wide settings. By default, it returns only settings that have been explicitly defined. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-get-settings | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-cluster-get-settings | Elasticsearch API documentation} */ async getSettings (this: That, params?: T.ClusterGetSettingsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getSettings (this: That, params?: T.ClusterGetSettingsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getSettings (this: That, params?: T.ClusterGetSettingsRequest, options?: TransportRequestOptions): Promise async getSettings (this: That, params?: T.ClusterGetSettingsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['cluster.get_settings'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -304,13 +505,16 @@ export default class Cluster { /** * Get the cluster health status. You can also use the API to get the health status of only specified data streams and indices. For data streams, the API retrieves the health status of the stream’s backing indices. The cluster health status is: green, yellow or red. On the shard level, a red status indicates that the specific shard is not allocated in the cluster. Yellow means that the primary shard is allocated but replicas are not. Green means that all shards are allocated. The index level status is controlled by the worst shard status. One of the main benefits of the API is the ability to wait until the cluster reaches a certain high watermark health level. The cluster status is controlled by the worst index status. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-health | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-cluster-health | Elasticsearch API documentation} */ async health (this: That, params?: T.ClusterHealthRequest, options?: TransportRequestOptionsWithOutMeta): Promise async health (this: That, params?: T.ClusterHealthRequest, options?: TransportRequestOptionsWithMeta): Promise> async health (this: That, params?: T.ClusterHealthRequest, options?: TransportRequestOptions): Promise async health (this: That, params?: T.ClusterHealthRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] + const { + path: acceptedPath + } = this.acceptedParams['cluster.health'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -354,13 +558,16 @@ export default class Cluster { /** * Get cluster info. Returns basic information about the cluster. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-info | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-cluster-info | Elasticsearch API documentation} */ async info (this: That, params: T.ClusterInfoRequest, options?: TransportRequestOptionsWithOutMeta): Promise async info (this: That, params: T.ClusterInfoRequest, options?: TransportRequestOptionsWithMeta): Promise> async info (this: That, params: T.ClusterInfoRequest, options?: TransportRequestOptions): Promise async info (this: That, params: T.ClusterInfoRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['target'] + const { + path: acceptedPath + } = this.acceptedParams['cluster.info'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -396,13 +603,16 @@ export default class Cluster { /** * Get the pending cluster tasks. Get information about cluster-level changes (such as create index, update mapping, allocate or fail shard) that have not yet taken effect. NOTE: This API returns a list of any pending updates to the cluster state. These are distinct from the tasks reported by the task management API which include periodic tasks and tasks initiated by the user, such as node stats, search queries, or create index requests. However, if a user-initiated task such as a create index command causes a cluster state update, the activity of this task might be reported by both task api and pending cluster tasks API. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-pending-tasks | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-cluster-pending-tasks | Elasticsearch API documentation} */ async pendingTasks (this: That, params?: T.ClusterPendingTasksRequest, options?: TransportRequestOptionsWithOutMeta): Promise async pendingTasks (this: That, params?: T.ClusterPendingTasksRequest, options?: TransportRequestOptionsWithMeta): Promise> async pendingTasks (this: That, params?: T.ClusterPendingTasksRequest, options?: TransportRequestOptions): Promise async pendingTasks (this: That, params?: T.ClusterPendingTasksRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['cluster.pending_tasks'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -436,13 +646,16 @@ export default class Cluster { /** * Update voting configuration exclusions. Update the cluster voting config exclusions by node IDs or node names. By default, if there are more than three master-eligible nodes in the cluster and you remove fewer than half of the master-eligible nodes in the cluster at once, the voting configuration automatically shrinks. If you want to shrink the voting configuration to contain fewer than three nodes or to remove half or more of the master-eligible nodes in the cluster at once, use this API to remove departing nodes from the voting configuration manually. The API adds an entry for each specified node to the cluster’s voting configuration exclusions list. It then waits until the cluster has reconfigured its voting configuration to exclude the specified nodes. Clusters should have no voting configuration exclusions in normal operation. Once the excluded nodes have stopped, clear the voting configuration exclusions with `DELETE /_cluster/voting_config_exclusions`. This API waits for the nodes to be fully removed from the cluster before it returns. If your cluster has voting configuration exclusions for nodes that you no longer intend to remove, use `DELETE /_cluster/voting_config_exclusions?wait_for_removal=false` to clear the voting configuration exclusions without waiting for the nodes to leave the cluster. A response to `POST /_cluster/voting_config_exclusions` with an HTTP status code of 200 OK guarantees that the node has been removed from the voting configuration and will not be reinstated until the voting configuration exclusions are cleared by calling `DELETE /_cluster/voting_config_exclusions`. If the call to `POST /_cluster/voting_config_exclusions` fails or returns a response with an HTTP status code other than 200 OK then the node may not have been removed from the voting configuration. In that case, you may safely retry the call. NOTE: Voting exclusions are required only when you remove at least half of the master-eligible nodes from a cluster in a short time period. They are not required when removing master-ineligible nodes or when removing fewer than half of the master-eligible nodes. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-post-voting-config-exclusions | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-cluster-post-voting-config-exclusions | Elasticsearch API documentation} */ async postVotingConfigExclusions (this: That, params?: T.ClusterPostVotingConfigExclusionsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async postVotingConfigExclusions (this: That, params?: T.ClusterPostVotingConfigExclusionsRequest, options?: TransportRequestOptionsWithMeta): Promise> async postVotingConfigExclusions (this: That, params?: T.ClusterPostVotingConfigExclusionsRequest, options?: TransportRequestOptions): Promise async postVotingConfigExclusions (this: That, params?: T.ClusterPostVotingConfigExclusionsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['cluster.post_voting_config_exclusions'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -476,14 +689,18 @@ export default class Cluster { /** * Create or update a component template. Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases. An index template can be composed of multiple component templates. To use a component template, specify it in an index template’s `composed_of` list. Component templates are only applied to new data streams and indices as part of a matching index template. Settings and mappings specified directly in the index template or the create index request override any settings or mappings specified in a component template. Component templates are only used during index creation. For data streams, this includes data stream creation and the creation of a stream’s backing indices. Changes to component templates do not affect existing indices, including a stream’s backing indices. You can use C-style `/* *\/` block comments in component templates. You can include comments anywhere in the request body except before the opening curly bracket. **Applying component templates** You cannot directly apply a component template to a data stream or index. To be applied, a component template must be included in an index template's `composed_of` list. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-put-component-template | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-cluster-put-component-template | Elasticsearch API documentation} */ async putComponentTemplate (this: That, params: T.ClusterPutComponentTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putComponentTemplate (this: That, params: T.ClusterPutComponentTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> async putComponentTemplate (this: That, params: T.ClusterPutComponentTemplateRequest, options?: TransportRequestOptions): Promise async putComponentTemplate (this: That, params: T.ClusterPutComponentTemplateRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] - const acceptedBody: string[] = ['template', 'version', '_meta', 'deprecated'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['cluster.put_component_template'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -505,8 +722,14 @@ export default class Cluster { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -523,14 +746,18 @@ export default class Cluster { /** * Update the cluster settings. Configure and update dynamic settings on a running cluster. You can also configure dynamic settings locally on an unstarted or shut down node in `elasticsearch.yml`. Updates made with this API can be persistent, which apply across cluster restarts, or transient, which reset after a cluster restart. You can also reset transient or persistent settings by assigning them a null value. If you configure the same setting using multiple methods, Elasticsearch applies the settings in following order of precedence: 1) Transient setting; 2) Persistent setting; 3) `elasticsearch.yml` setting; 4) Default setting value. For example, you can apply a transient setting to override a persistent setting or `elasticsearch.yml` setting. However, a change to an `elasticsearch.yml` setting will not override a defined transient or persistent setting. TIP: In Elastic Cloud, use the user settings feature to configure all cluster settings. This method automatically rejects unsafe settings that could break your cluster. If you run Elasticsearch on your own hardware, use this API to configure dynamic cluster settings. Only use `elasticsearch.yml` for static cluster settings and node settings. The API doesn’t require a restart and ensures a setting’s value is the same on all nodes. WARNING: Transient cluster settings are no longer recommended. Use persistent cluster settings instead. If a cluster becomes unstable, transient settings can clear unexpectedly, resulting in a potentially undesired cluster configuration. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-put-settings | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-cluster-put-settings | Elasticsearch API documentation} */ async putSettings (this: That, params?: T.ClusterPutSettingsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putSettings (this: That, params?: T.ClusterPutSettingsRequest, options?: TransportRequestOptionsWithMeta): Promise> async putSettings (this: That, params?: T.ClusterPutSettingsRequest, options?: TransportRequestOptions): Promise async putSettings (this: That, params?: T.ClusterPutSettingsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['persistent', 'transient'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['cluster.put_settings'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -553,8 +780,14 @@ export default class Cluster { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -568,13 +801,16 @@ export default class Cluster { /** * Get remote cluster information. Get information about configured remote clusters. The API returns connection and endpoint information keyed by the configured remote cluster alias. > info > This API returns information that reflects current state on the local cluster. > The `connected` field does not necessarily reflect whether a remote cluster is down or unavailable, only whether there is currently an open connection to it. > Elasticsearch does not spontaneously try to reconnect to a disconnected remote cluster. > To trigger a reconnection, attempt a cross-cluster search, ES|QL cross-cluster search, or try the [resolve cluster endpoint](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-resolve-cluster). - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-remote-info | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-cluster-remote-info | Elasticsearch API documentation} */ async remoteInfo (this: That, params?: T.ClusterRemoteInfoRequest, options?: TransportRequestOptionsWithOutMeta): Promise async remoteInfo (this: That, params?: T.ClusterRemoteInfoRequest, options?: TransportRequestOptionsWithMeta): Promise> async remoteInfo (this: That, params?: T.ClusterRemoteInfoRequest, options?: TransportRequestOptions): Promise async remoteInfo (this: That, params?: T.ClusterRemoteInfoRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['cluster.remote_info'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -608,14 +844,18 @@ export default class Cluster { /** * Reroute the cluster. Manually change the allocation of individual shards in the cluster. For example, a shard can be moved from one node to another explicitly, an allocation can be canceled, and an unassigned shard can be explicitly allocated to a specific node. It is important to note that after processing any reroute commands Elasticsearch will perform rebalancing as normal (respecting the values of settings such as `cluster.routing.rebalance.enable`) in order to remain in a balanced state. For example, if the requested allocation includes moving a shard from node1 to node2 then this may cause a shard to be moved from node2 back to node1 to even things out. The cluster can be set to disable allocations using the `cluster.routing.allocation.enable` setting. If allocations are disabled then the only allocations that will be performed are explicit ones given using the reroute command, and consequent allocations due to rebalancing. The cluster will attempt to allocate a shard a maximum of `index.allocation.max_retries` times in a row (defaults to `5`), before giving up and leaving the shard unallocated. This scenario can be caused by structural problems such as having an analyzer which refers to a stopwords file which doesn’t exist on all nodes. Once the problem has been corrected, allocation can be manually retried by calling the reroute API with the `?retry_failed` URI query parameter, which will attempt a single retry round for these shards. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-reroute | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-cluster-reroute | Elasticsearch API documentation} */ async reroute (this: That, params?: T.ClusterRerouteRequest, options?: TransportRequestOptionsWithOutMeta): Promise async reroute (this: That, params?: T.ClusterRerouteRequest, options?: TransportRequestOptionsWithMeta): Promise> async reroute (this: That, params?: T.ClusterRerouteRequest, options?: TransportRequestOptions): Promise async reroute (this: That, params?: T.ClusterRerouteRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['commands'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['cluster.reroute'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -638,8 +878,14 @@ export default class Cluster { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -653,13 +899,16 @@ export default class Cluster { /** * Get the cluster state. Get comprehensive information about the state of the cluster. The cluster state is an internal data structure which keeps track of a variety of information needed by every node, including the identity and attributes of the other nodes in the cluster; cluster-wide settings; index metadata, including the mapping and settings for each index; the location and status of every shard copy in the cluster. The elected master node ensures that every node in the cluster has a copy of the same cluster state. This API lets you retrieve a representation of this internal state for debugging or diagnostic purposes. You may need to consult the Elasticsearch source code to determine the precise meaning of the response. By default the API will route requests to the elected master node since this node is the authoritative source of cluster states. You can also retrieve the cluster state held on the node handling the API request by adding the `?local=true` query parameter. Elasticsearch may need to expend significant effort to compute a response to this API in larger clusters, and the response may comprise a very large quantity of data. If you use this API repeatedly, your cluster may become unstable. WARNING: The response is a representation of an internal data structure. Its format is not subject to the same compatibility guarantees as other more stable APIs and may change from version to version. Do not query this API using external monitoring tools. Instead, obtain the information you require using other more stable cluster APIs. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-state | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-cluster-state | Elasticsearch API documentation} */ async state (this: That, params?: T.ClusterStateRequest, options?: TransportRequestOptionsWithOutMeta): Promise async state (this: That, params?: T.ClusterStateRequest, options?: TransportRequestOptionsWithMeta): Promise> async state (this: That, params?: T.ClusterStateRequest, options?: TransportRequestOptions): Promise async state (this: That, params?: T.ClusterStateRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['metric', 'index'] + const { + path: acceptedPath + } = this.acceptedParams['cluster.state'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -707,13 +956,16 @@ export default class Cluster { /** * Get cluster statistics. Get basic index metrics (shard numbers, store size, memory usage) and information about the current nodes that form the cluster (number, roles, os, jvm versions, memory usage, cpu and installed plugins). - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-stats | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-cluster-stats | Elasticsearch API documentation} */ async stats (this: That, params?: T.ClusterStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async stats (this: That, params?: T.ClusterStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> async stats (this: That, params?: T.ClusterStatsRequest, options?: TransportRequestOptions): Promise async stats (this: That, params?: T.ClusterStatsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['node_id'] + const { + path: acceptedPath + } = this.acceptedParams['cluster.stats'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/connector.ts b/src/api/api/connector.ts index 141aa8002..f1ed53880 100644 --- a/src/api/api/connector.ts +++ b/src/api/api/connector.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,23 +21,356 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class Connector { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'connector.check_in': { + path: [ + 'connector_id' + ], + body: [], + query: [] + }, + 'connector.delete': { + path: [ + 'connector_id' + ], + body: [], + query: [ + 'delete_sync_jobs', + 'hard' + ] + }, + 'connector.get': { + path: [ + 'connector_id' + ], + body: [], + query: [ + 'include_deleted' + ] + }, + 'connector.last_sync': { + path: [ + 'connector_id' + ], + body: [ + 'last_access_control_sync_error', + 'last_access_control_sync_scheduled_at', + 'last_access_control_sync_status', + 'last_deleted_document_count', + 'last_incremental_sync_scheduled_at', + 'last_indexed_document_count', + 'last_seen', + 'last_sync_error', + 'last_sync_scheduled_at', + 'last_sync_status', + 'last_synced', + 'sync_cursor' + ], + query: [] + }, + 'connector.list': { + path: [], + body: [], + query: [ + 'from', + 'size', + 'index_name', + 'connector_name', + 'service_type', + 'include_deleted', + 'query' + ] + }, + 'connector.post': { + path: [], + body: [ + 'description', + 'index_name', + 'is_native', + 'language', + 'name', + 'service_type' + ], + query: [] + }, + 'connector.put': { + path: [ + 'connector_id' + ], + body: [ + 'description', + 'index_name', + 'is_native', + 'language', + 'name', + 'service_type' + ], + query: [] + }, + 'connector.secret_delete': { + path: [ + 'id' + ], + body: [], + query: [] + }, + 'connector.secret_get': { + path: [ + 'id' + ], + body: [], + query: [] + }, + 'connector.secret_post': { + path: [], + body: [], + query: [] + }, + 'connector.secret_put': { + path: [ + 'id' + ], + body: [], + query: [] + }, + 'connector.sync_job_cancel': { + path: [ + 'connector_sync_job_id' + ], + body: [], + query: [] + }, + 'connector.sync_job_check_in': { + path: [ + 'connector_sync_job_id' + ], + body: [], + query: [] + }, + 'connector.sync_job_claim': { + path: [ + 'connector_sync_job_id' + ], + body: [ + 'sync_cursor', + 'worker_hostname' + ], + query: [] + }, + 'connector.sync_job_delete': { + path: [ + 'connector_sync_job_id' + ], + body: [], + query: [] + }, + 'connector.sync_job_error': { + path: [ + 'connector_sync_job_id' + ], + body: [ + 'error' + ], + query: [] + }, + 'connector.sync_job_get': { + path: [ + 'connector_sync_job_id' + ], + body: [], + query: [] + }, + 'connector.sync_job_list': { + path: [], + body: [], + query: [ + 'from', + 'size', + 'status', + 'connector_id', + 'job_type' + ] + }, + 'connector.sync_job_post': { + path: [], + body: [ + 'id', + 'job_type', + 'trigger_method' + ], + query: [] + }, + 'connector.sync_job_update_stats': { + path: [ + 'connector_sync_job_id' + ], + body: [ + 'deleted_document_count', + 'indexed_document_count', + 'indexed_document_volume', + 'last_seen', + 'metadata', + 'total_document_count' + ], + query: [] + }, + 'connector.update_active_filtering': { + path: [ + 'connector_id' + ], + body: [], + query: [] + }, + 'connector.update_api_key_id': { + path: [ + 'connector_id' + ], + body: [ + 'api_key_id', + 'api_key_secret_id' + ], + query: [] + }, + 'connector.update_configuration': { + path: [ + 'connector_id' + ], + body: [ + 'configuration', + 'values' + ], + query: [] + }, + 'connector.update_error': { + path: [ + 'connector_id' + ], + body: [ + 'error' + ], + query: [] + }, + 'connector.update_features': { + path: [ + 'connector_id' + ], + body: [ + 'features' + ], + query: [] + }, + 'connector.update_filtering': { + path: [ + 'connector_id' + ], + body: [ + 'filtering', + 'rules', + 'advanced_snippet' + ], + query: [] + }, + 'connector.update_filtering_validation': { + path: [ + 'connector_id' + ], + body: [ + 'validation' + ], + query: [] + }, + 'connector.update_index_name': { + path: [ + 'connector_id' + ], + body: [ + 'index_name' + ], + query: [] + }, + 'connector.update_name': { + path: [ + 'connector_id' + ], + body: [ + 'name', + 'description' + ], + query: [] + }, + 'connector.update_native': { + path: [ + 'connector_id' + ], + body: [ + 'is_native' + ], + query: [] + }, + 'connector.update_pipeline': { + path: [ + 'connector_id' + ], + body: [ + 'pipeline' + ], + query: [] + }, + 'connector.update_scheduling': { + path: [ + 'connector_id' + ], + body: [ + 'scheduling' + ], + query: [] + }, + 'connector.update_service_type': { + path: [ + 'connector_id' + ], + body: [ + 'service_type' + ], + query: [] + }, + 'connector.update_status': { + path: [ + 'connector_id' + ], + body: [ + 'status' + ], + query: [] + } + } } /** * Check in a connector. Update the `last_seen` field in the connector and set it to the current timestamp. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-check-in | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-connector-check-in | Elasticsearch API documentation} */ async checkIn (this: That, params: T.ConnectorCheckInRequest, options?: TransportRequestOptionsWithOutMeta): Promise async checkIn (this: That, params: T.ConnectorCheckInRequest, options?: TransportRequestOptionsWithMeta): Promise> async checkIn (this: That, params: T.ConnectorCheckInRequest, options?: TransportRequestOptions): Promise async checkIn (this: That, params: T.ConnectorCheckInRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['connector_id'] + const { + path: acceptedPath + } = this.acceptedParams['connector.check_in'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -87,13 +406,16 @@ export default class Connector { /** * Delete a connector. Removes a connector and associated sync jobs. This is a destructive action that is not recoverable. NOTE: This action doesn’t delete any API keys, ingest pipelines, or data indices associated with the connector. These need to be removed manually. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-delete | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-connector-delete | Elasticsearch API documentation} */ async delete (this: That, params: T.ConnectorDeleteRequest, options?: TransportRequestOptionsWithOutMeta): Promise async delete (this: That, params: T.ConnectorDeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> async delete (this: That, params: T.ConnectorDeleteRequest, options?: TransportRequestOptions): Promise async delete (this: That, params: T.ConnectorDeleteRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['connector_id'] + const { + path: acceptedPath + } = this.acceptedParams['connector.delete'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -129,13 +451,16 @@ export default class Connector { /** * Get a connector. Get the details about a connector. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-get | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-connector-get | Elasticsearch API documentation} */ async get (this: That, params: T.ConnectorGetRequest, options?: TransportRequestOptionsWithOutMeta): Promise async get (this: That, params: T.ConnectorGetRequest, options?: TransportRequestOptionsWithMeta): Promise> async get (this: That, params: T.ConnectorGetRequest, options?: TransportRequestOptions): Promise async get (this: That, params: T.ConnectorGetRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['connector_id'] + const { + path: acceptedPath + } = this.acceptedParams['connector.get'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -171,14 +496,18 @@ export default class Connector { /** * Update the connector last sync stats. Update the fields related to the last sync of a connector. This action is used for analytics and monitoring. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-last-sync | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-connector-last-sync | Elasticsearch API documentation} */ async lastSync (this: That, params: T.ConnectorLastSyncRequest, options?: TransportRequestOptionsWithOutMeta): Promise async lastSync (this: That, params: T.ConnectorLastSyncRequest, options?: TransportRequestOptionsWithMeta): Promise> async lastSync (this: That, params: T.ConnectorLastSyncRequest, options?: TransportRequestOptions): Promise async lastSync (this: That, params: T.ConnectorLastSyncRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['connector_id'] - const acceptedBody: string[] = ['last_access_control_sync_error', 'last_access_control_sync_scheduled_at', 'last_access_control_sync_status', 'last_deleted_document_count', 'last_incremental_sync_scheduled_at', 'last_indexed_document_count', 'last_seen', 'last_sync_error', 'last_sync_scheduled_at', 'last_sync_status', 'last_synced', 'sync_cursor'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['connector.last_sync'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -200,8 +529,14 @@ export default class Connector { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -218,13 +553,16 @@ export default class Connector { /** * Get all connectors. Get information about all connectors. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-list | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-connector-list | Elasticsearch API documentation} */ async list (this: That, params?: T.ConnectorListRequest, options?: TransportRequestOptionsWithOutMeta): Promise async list (this: That, params?: T.ConnectorListRequest, options?: TransportRequestOptionsWithMeta): Promise> async list (this: That, params?: T.ConnectorListRequest, options?: TransportRequestOptions): Promise async list (this: That, params?: T.ConnectorListRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['connector.list'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -258,14 +596,18 @@ export default class Connector { /** * Create a connector. Connectors are Elasticsearch integrations that bring content from third-party data sources, which can be deployed on Elastic Cloud or hosted on your own infrastructure. Elastic managed connectors (Native connectors) are a managed service on Elastic Cloud. Self-managed connectors (Connector clients) are self-managed on your infrastructure. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-put | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-connector-put | Elasticsearch API documentation} */ async post (this: That, params?: T.ConnectorPostRequest, options?: TransportRequestOptionsWithOutMeta): Promise async post (this: That, params?: T.ConnectorPostRequest, options?: TransportRequestOptionsWithMeta): Promise> async post (this: That, params?: T.ConnectorPostRequest, options?: TransportRequestOptions): Promise async post (this: That, params?: T.ConnectorPostRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['description', 'index_name', 'is_native', 'language', 'name', 'service_type'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['connector.post'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -288,8 +630,14 @@ export default class Connector { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -303,14 +651,18 @@ export default class Connector { /** * Create or update a connector. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-put | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-connector-put | Elasticsearch API documentation} */ async put (this: That, params?: T.ConnectorPutRequest, options?: TransportRequestOptionsWithOutMeta): Promise async put (this: That, params?: T.ConnectorPutRequest, options?: TransportRequestOptionsWithMeta): Promise> async put (this: That, params?: T.ConnectorPutRequest, options?: TransportRequestOptions): Promise async put (this: That, params?: T.ConnectorPutRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['connector_id'] - const acceptedBody: string[] = ['description', 'index_name', 'is_native', 'language', 'name', 'service_type'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['connector.put'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -333,8 +685,14 @@ export default class Connector { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -363,7 +721,10 @@ export default class Connector { async secretDelete (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> async secretDelete (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise async secretDelete (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] + const { + path: acceptedPath + } = this.acceptedParams['connector.secret_delete'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -404,7 +765,10 @@ export default class Connector { async secretGet (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> async secretGet (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise async secretGet (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] + const { + path: acceptedPath + } = this.acceptedParams['connector.secret_get'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -445,7 +809,10 @@ export default class Connector { async secretPost (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> async secretPost (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise async secretPost (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['connector.secret_post'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -483,7 +850,10 @@ export default class Connector { async secretPut (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> async secretPut (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise async secretPut (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] + const { + path: acceptedPath + } = this.acceptedParams['connector.secret_put'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -519,13 +889,16 @@ export default class Connector { /** * Cancel a connector sync job. Cancel a connector sync job, which sets the status to cancelling and updates `cancellation_requested_at` to the current time. The connector service is then responsible for setting the status of connector sync jobs to cancelled. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-cancel | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-connector-sync-job-cancel | Elasticsearch API documentation} */ async syncJobCancel (this: That, params: T.ConnectorSyncJobCancelRequest, options?: TransportRequestOptionsWithOutMeta): Promise async syncJobCancel (this: That, params: T.ConnectorSyncJobCancelRequest, options?: TransportRequestOptionsWithMeta): Promise> async syncJobCancel (this: That, params: T.ConnectorSyncJobCancelRequest, options?: TransportRequestOptions): Promise async syncJobCancel (this: That, params: T.ConnectorSyncJobCancelRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['connector_sync_job_id'] + const { + path: acceptedPath + } = this.acceptedParams['connector.sync_job_cancel'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -561,13 +934,16 @@ export default class Connector { /** * Check in a connector sync job. Check in a connector sync job and set the `last_seen` field to the current time before updating it in the internal index. To sync data using self-managed connectors, you need to deploy the Elastic connector service on your own infrastructure. This service runs automatically on Elastic Cloud for Elastic managed connectors. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-check-in | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-connector-sync-job-check-in | Elasticsearch API documentation} */ async syncJobCheckIn (this: That, params: T.ConnectorSyncJobCheckInRequest, options?: TransportRequestOptionsWithOutMeta): Promise async syncJobCheckIn (this: That, params: T.ConnectorSyncJobCheckInRequest, options?: TransportRequestOptionsWithMeta): Promise> async syncJobCheckIn (this: That, params: T.ConnectorSyncJobCheckInRequest, options?: TransportRequestOptions): Promise async syncJobCheckIn (this: That, params: T.ConnectorSyncJobCheckInRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['connector_sync_job_id'] + const { + path: acceptedPath + } = this.acceptedParams['connector.sync_job_check_in'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -603,14 +979,18 @@ export default class Connector { /** * Claim a connector sync job. This action updates the job status to `in_progress` and sets the `last_seen` and `started_at` timestamps to the current time. Additionally, it can set the `sync_cursor` property for the sync job. This API is not intended for direct connector management by users. It supports the implementation of services that utilize the connector protocol to communicate with Elasticsearch. To sync data using self-managed connectors, you need to deploy the Elastic connector service on your own infrastructure. This service runs automatically on Elastic Cloud for Elastic managed connectors. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-claim | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-connector-sync-job-claim | Elasticsearch API documentation} */ async syncJobClaim (this: That, params: T.ConnectorSyncJobClaimRequest, options?: TransportRequestOptionsWithOutMeta): Promise async syncJobClaim (this: That, params: T.ConnectorSyncJobClaimRequest, options?: TransportRequestOptionsWithMeta): Promise> async syncJobClaim (this: That, params: T.ConnectorSyncJobClaimRequest, options?: TransportRequestOptions): Promise async syncJobClaim (this: That, params: T.ConnectorSyncJobClaimRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['connector_sync_job_id'] - const acceptedBody: string[] = ['sync_cursor', 'worker_hostname'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['connector.sync_job_claim'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -632,8 +1012,14 @@ export default class Connector { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -650,13 +1036,16 @@ export default class Connector { /** * Delete a connector sync job. Remove a connector sync job and its associated data. This is a destructive action that is not recoverable. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-delete | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-connector-sync-job-delete | Elasticsearch API documentation} */ async syncJobDelete (this: That, params: T.ConnectorSyncJobDeleteRequest, options?: TransportRequestOptionsWithOutMeta): Promise async syncJobDelete (this: That, params: T.ConnectorSyncJobDeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> async syncJobDelete (this: That, params: T.ConnectorSyncJobDeleteRequest, options?: TransportRequestOptions): Promise async syncJobDelete (this: That, params: T.ConnectorSyncJobDeleteRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['connector_sync_job_id'] + const { + path: acceptedPath + } = this.acceptedParams['connector.sync_job_delete'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -692,14 +1081,18 @@ export default class Connector { /** * Set a connector sync job error. Set the `error` field for a connector sync job and set its `status` to `error`. To sync data using self-managed connectors, you need to deploy the Elastic connector service on your own infrastructure. This service runs automatically on Elastic Cloud for Elastic managed connectors. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-error | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-connector-sync-job-error | Elasticsearch API documentation} */ async syncJobError (this: That, params: T.ConnectorSyncJobErrorRequest, options?: TransportRequestOptionsWithOutMeta): Promise async syncJobError (this: That, params: T.ConnectorSyncJobErrorRequest, options?: TransportRequestOptionsWithMeta): Promise> async syncJobError (this: That, params: T.ConnectorSyncJobErrorRequest, options?: TransportRequestOptions): Promise async syncJobError (this: That, params: T.ConnectorSyncJobErrorRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['connector_sync_job_id'] - const acceptedBody: string[] = ['error'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['connector.sync_job_error'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -721,8 +1114,14 @@ export default class Connector { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -739,13 +1138,16 @@ export default class Connector { /** * Get a connector sync job. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-get | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-connector-sync-job-get | Elasticsearch API documentation} */ async syncJobGet (this: That, params: T.ConnectorSyncJobGetRequest, options?: TransportRequestOptionsWithOutMeta): Promise async syncJobGet (this: That, params: T.ConnectorSyncJobGetRequest, options?: TransportRequestOptionsWithMeta): Promise> async syncJobGet (this: That, params: T.ConnectorSyncJobGetRequest, options?: TransportRequestOptions): Promise async syncJobGet (this: That, params: T.ConnectorSyncJobGetRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['connector_sync_job_id'] + const { + path: acceptedPath + } = this.acceptedParams['connector.sync_job_get'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -781,13 +1183,16 @@ export default class Connector { /** * Get all connector sync jobs. Get information about all stored connector sync jobs listed by their creation date in ascending order. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-list | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-connector-sync-job-list | Elasticsearch API documentation} */ async syncJobList (this: That, params?: T.ConnectorSyncJobListRequest, options?: TransportRequestOptionsWithOutMeta): Promise async syncJobList (this: That, params?: T.ConnectorSyncJobListRequest, options?: TransportRequestOptionsWithMeta): Promise> async syncJobList (this: That, params?: T.ConnectorSyncJobListRequest, options?: TransportRequestOptions): Promise async syncJobList (this: That, params?: T.ConnectorSyncJobListRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['connector.sync_job_list'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -821,14 +1226,18 @@ export default class Connector { /** * Create a connector sync job. Create a connector sync job document in the internal index and initialize its counters and timestamps with default values. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-post | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-connector-sync-job-post | Elasticsearch API documentation} */ async syncJobPost (this: That, params: T.ConnectorSyncJobPostRequest, options?: TransportRequestOptionsWithOutMeta): Promise async syncJobPost (this: That, params: T.ConnectorSyncJobPostRequest, options?: TransportRequestOptionsWithMeta): Promise> async syncJobPost (this: That, params: T.ConnectorSyncJobPostRequest, options?: TransportRequestOptions): Promise async syncJobPost (this: That, params: T.ConnectorSyncJobPostRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['id', 'job_type', 'trigger_method'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['connector.sync_job_post'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -850,8 +1259,14 @@ export default class Connector { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -865,14 +1280,18 @@ export default class Connector { /** * Set the connector sync job stats. Stats include: `deleted_document_count`, `indexed_document_count`, `indexed_document_volume`, and `total_document_count`. You can also update `last_seen`. This API is mainly used by the connector service for updating sync job information. To sync data using self-managed connectors, you need to deploy the Elastic connector service on your own infrastructure. This service runs automatically on Elastic Cloud for Elastic managed connectors. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-sync-job-update-stats | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-connector-sync-job-update-stats | Elasticsearch API documentation} */ async syncJobUpdateStats (this: That, params: T.ConnectorSyncJobUpdateStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async syncJobUpdateStats (this: That, params: T.ConnectorSyncJobUpdateStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> async syncJobUpdateStats (this: That, params: T.ConnectorSyncJobUpdateStatsRequest, options?: TransportRequestOptions): Promise async syncJobUpdateStats (this: That, params: T.ConnectorSyncJobUpdateStatsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['connector_sync_job_id'] - const acceptedBody: string[] = ['deleted_document_count', 'indexed_document_count', 'indexed_document_volume', 'last_seen', 'metadata', 'total_document_count'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['connector.sync_job_update_stats'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -894,8 +1313,14 @@ export default class Connector { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -912,13 +1337,16 @@ export default class Connector { /** * Activate the connector draft filter. Activates the valid draft filtering for a connector. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-filtering | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-connector-update-filtering | Elasticsearch API documentation} */ async updateActiveFiltering (this: That, params: T.ConnectorUpdateActiveFilteringRequest, options?: TransportRequestOptionsWithOutMeta): Promise async updateActiveFiltering (this: That, params: T.ConnectorUpdateActiveFilteringRequest, options?: TransportRequestOptionsWithMeta): Promise> async updateActiveFiltering (this: That, params: T.ConnectorUpdateActiveFilteringRequest, options?: TransportRequestOptions): Promise async updateActiveFiltering (this: That, params: T.ConnectorUpdateActiveFilteringRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['connector_id'] + const { + path: acceptedPath + } = this.acceptedParams['connector.update_active_filtering'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -954,14 +1382,18 @@ export default class Connector { /** * Update the connector API key ID. Update the `api_key_id` and `api_key_secret_id` fields of a connector. You can specify the ID of the API key used for authorization and the ID of the connector secret where the API key is stored. The connector secret ID is required only for Elastic managed (native) connectors. Self-managed connectors (connector clients) do not use this field. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-api-key-id | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-connector-update-api-key-id | Elasticsearch API documentation} */ async updateApiKeyId (this: That, params: T.ConnectorUpdateApiKeyIdRequest, options?: TransportRequestOptionsWithOutMeta): Promise async updateApiKeyId (this: That, params: T.ConnectorUpdateApiKeyIdRequest, options?: TransportRequestOptionsWithMeta): Promise> async updateApiKeyId (this: That, params: T.ConnectorUpdateApiKeyIdRequest, options?: TransportRequestOptions): Promise async updateApiKeyId (this: That, params: T.ConnectorUpdateApiKeyIdRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['connector_id'] - const acceptedBody: string[] = ['api_key_id', 'api_key_secret_id'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['connector.update_api_key_id'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -983,8 +1415,14 @@ export default class Connector { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -1001,14 +1439,18 @@ export default class Connector { /** * Update the connector configuration. Update the configuration field in the connector document. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-configuration | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-connector-update-configuration | Elasticsearch API documentation} */ async updateConfiguration (this: That, params: T.ConnectorUpdateConfigurationRequest, options?: TransportRequestOptionsWithOutMeta): Promise async updateConfiguration (this: That, params: T.ConnectorUpdateConfigurationRequest, options?: TransportRequestOptionsWithMeta): Promise> async updateConfiguration (this: That, params: T.ConnectorUpdateConfigurationRequest, options?: TransportRequestOptions): Promise async updateConfiguration (this: That, params: T.ConnectorUpdateConfigurationRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['connector_id'] - const acceptedBody: string[] = ['configuration', 'values'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['connector.update_configuration'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1030,8 +1472,14 @@ export default class Connector { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -1048,14 +1496,18 @@ export default class Connector { /** * Update the connector error field. Set the error field for the connector. If the error provided in the request body is non-null, the connector’s status is updated to error. Otherwise, if the error is reset to null, the connector status is updated to connected. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-error | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-connector-update-error | Elasticsearch API documentation} */ async updateError (this: That, params: T.ConnectorUpdateErrorRequest, options?: TransportRequestOptionsWithOutMeta): Promise async updateError (this: That, params: T.ConnectorUpdateErrorRequest, options?: TransportRequestOptionsWithMeta): Promise> async updateError (this: That, params: T.ConnectorUpdateErrorRequest, options?: TransportRequestOptions): Promise async updateError (this: That, params: T.ConnectorUpdateErrorRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['connector_id'] - const acceptedBody: string[] = ['error'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['connector.update_error'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1077,8 +1529,14 @@ export default class Connector { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -1095,14 +1553,18 @@ export default class Connector { /** * Update the connector features. Update the connector features in the connector document. This API can be used to control the following aspects of a connector: * document-level security * incremental syncs * advanced sync rules * basic sync rules Normally, the running connector service automatically manages these features. However, you can use this API to override the default behavior. To sync data using self-managed connectors, you need to deploy the Elastic connector service on your own infrastructure. This service runs automatically on Elastic Cloud for Elastic managed connectors. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-features | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-connector-update-features | Elasticsearch API documentation} */ async updateFeatures (this: That, params: T.ConnectorUpdateFeaturesRequest, options?: TransportRequestOptionsWithOutMeta): Promise async updateFeatures (this: That, params: T.ConnectorUpdateFeaturesRequest, options?: TransportRequestOptionsWithMeta): Promise> async updateFeatures (this: That, params: T.ConnectorUpdateFeaturesRequest, options?: TransportRequestOptions): Promise async updateFeatures (this: That, params: T.ConnectorUpdateFeaturesRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['connector_id'] - const acceptedBody: string[] = ['features'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['connector.update_features'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1124,8 +1586,14 @@ export default class Connector { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -1142,14 +1610,18 @@ export default class Connector { /** * Update the connector filtering. Update the draft filtering configuration of a connector and marks the draft validation state as edited. The filtering draft is activated once validated by the running Elastic connector service. The filtering property is used to configure sync rules (both basic and advanced) for a connector. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-filtering | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-connector-update-filtering | Elasticsearch API documentation} */ async updateFiltering (this: That, params: T.ConnectorUpdateFilteringRequest, options?: TransportRequestOptionsWithOutMeta): Promise async updateFiltering (this: That, params: T.ConnectorUpdateFilteringRequest, options?: TransportRequestOptionsWithMeta): Promise> async updateFiltering (this: That, params: T.ConnectorUpdateFilteringRequest, options?: TransportRequestOptions): Promise async updateFiltering (this: That, params: T.ConnectorUpdateFilteringRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['connector_id'] - const acceptedBody: string[] = ['filtering', 'rules', 'advanced_snippet'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['connector.update_filtering'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1171,8 +1643,14 @@ export default class Connector { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -1189,14 +1667,18 @@ export default class Connector { /** * Update the connector draft filtering validation. Update the draft filtering validation info for a connector. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/update-connector-filtering-validation-api.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-connector-update-filtering-validation | Elasticsearch API documentation} */ async updateFilteringValidation (this: That, params: T.ConnectorUpdateFilteringValidationRequest, options?: TransportRequestOptionsWithOutMeta): Promise async updateFilteringValidation (this: That, params: T.ConnectorUpdateFilteringValidationRequest, options?: TransportRequestOptionsWithMeta): Promise> async updateFilteringValidation (this: That, params: T.ConnectorUpdateFilteringValidationRequest, options?: TransportRequestOptions): Promise async updateFilteringValidation (this: That, params: T.ConnectorUpdateFilteringValidationRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['connector_id'] - const acceptedBody: string[] = ['validation'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['connector.update_filtering_validation'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1218,8 +1700,14 @@ export default class Connector { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -1236,14 +1724,18 @@ export default class Connector { /** * Update the connector index name. Update the `index_name` field of a connector, specifying the index where the data ingested by the connector is stored. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-index-name | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-connector-update-index-name | Elasticsearch API documentation} */ async updateIndexName (this: That, params: T.ConnectorUpdateIndexNameRequest, options?: TransportRequestOptionsWithOutMeta): Promise async updateIndexName (this: That, params: T.ConnectorUpdateIndexNameRequest, options?: TransportRequestOptionsWithMeta): Promise> async updateIndexName (this: That, params: T.ConnectorUpdateIndexNameRequest, options?: TransportRequestOptions): Promise async updateIndexName (this: That, params: T.ConnectorUpdateIndexNameRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['connector_id'] - const acceptedBody: string[] = ['index_name'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['connector.update_index_name'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1265,8 +1757,14 @@ export default class Connector { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -1283,14 +1781,18 @@ export default class Connector { /** * Update the connector name and description. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-name | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-connector-update-name | Elasticsearch API documentation} */ async updateName (this: That, params: T.ConnectorUpdateNameRequest, options?: TransportRequestOptionsWithOutMeta): Promise async updateName (this: That, params: T.ConnectorUpdateNameRequest, options?: TransportRequestOptionsWithMeta): Promise> async updateName (this: That, params: T.ConnectorUpdateNameRequest, options?: TransportRequestOptions): Promise async updateName (this: That, params: T.ConnectorUpdateNameRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['connector_id'] - const acceptedBody: string[] = ['name', 'description'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['connector.update_name'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1312,8 +1814,14 @@ export default class Connector { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -1330,14 +1838,18 @@ export default class Connector { /** * Update the connector is_native flag. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/update-connector-native-api.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-connector-update-native | Elasticsearch API documentation} */ async updateNative (this: That, params: T.ConnectorUpdateNativeRequest, options?: TransportRequestOptionsWithOutMeta): Promise async updateNative (this: That, params: T.ConnectorUpdateNativeRequest, options?: TransportRequestOptionsWithMeta): Promise> async updateNative (this: That, params: T.ConnectorUpdateNativeRequest, options?: TransportRequestOptions): Promise async updateNative (this: That, params: T.ConnectorUpdateNativeRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['connector_id'] - const acceptedBody: string[] = ['is_native'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['connector.update_native'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1359,8 +1871,14 @@ export default class Connector { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -1377,14 +1895,18 @@ export default class Connector { /** * Update the connector pipeline. When you create a new connector, the configuration of an ingest pipeline is populated with default settings. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-pipeline | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-connector-update-pipeline | Elasticsearch API documentation} */ async updatePipeline (this: That, params: T.ConnectorUpdatePipelineRequest, options?: TransportRequestOptionsWithOutMeta): Promise async updatePipeline (this: That, params: T.ConnectorUpdatePipelineRequest, options?: TransportRequestOptionsWithMeta): Promise> async updatePipeline (this: That, params: T.ConnectorUpdatePipelineRequest, options?: TransportRequestOptions): Promise async updatePipeline (this: That, params: T.ConnectorUpdatePipelineRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['connector_id'] - const acceptedBody: string[] = ['pipeline'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['connector.update_pipeline'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1406,8 +1928,14 @@ export default class Connector { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -1424,14 +1952,18 @@ export default class Connector { /** * Update the connector scheduling. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-scheduling | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-connector-update-scheduling | Elasticsearch API documentation} */ async updateScheduling (this: That, params: T.ConnectorUpdateSchedulingRequest, options?: TransportRequestOptionsWithOutMeta): Promise async updateScheduling (this: That, params: T.ConnectorUpdateSchedulingRequest, options?: TransportRequestOptionsWithMeta): Promise> async updateScheduling (this: That, params: T.ConnectorUpdateSchedulingRequest, options?: TransportRequestOptions): Promise async updateScheduling (this: That, params: T.ConnectorUpdateSchedulingRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['connector_id'] - const acceptedBody: string[] = ['scheduling'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['connector.update_scheduling'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1453,8 +1985,14 @@ export default class Connector { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -1471,14 +2009,18 @@ export default class Connector { /** * Update the connector service type. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-service-type | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-connector-update-service-type | Elasticsearch API documentation} */ async updateServiceType (this: That, params: T.ConnectorUpdateServiceTypeRequest, options?: TransportRequestOptionsWithOutMeta): Promise async updateServiceType (this: That, params: T.ConnectorUpdateServiceTypeRequest, options?: TransportRequestOptionsWithMeta): Promise> async updateServiceType (this: That, params: T.ConnectorUpdateServiceTypeRequest, options?: TransportRequestOptions): Promise async updateServiceType (this: That, params: T.ConnectorUpdateServiceTypeRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['connector_id'] - const acceptedBody: string[] = ['service_type'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['connector.update_service_type'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1500,8 +2042,14 @@ export default class Connector { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -1518,14 +2066,18 @@ export default class Connector { /** * Update the connector status. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-connector-update-status | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-connector-update-status | Elasticsearch API documentation} */ async updateStatus (this: That, params: T.ConnectorUpdateStatusRequest, options?: TransportRequestOptionsWithOutMeta): Promise async updateStatus (this: That, params: T.ConnectorUpdateStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> async updateStatus (this: That, params: T.ConnectorUpdateStatusRequest, options?: TransportRequestOptions): Promise async updateStatus (this: That, params: T.ConnectorUpdateStatusRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['connector_id'] - const acceptedBody: string[] = ['status'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['connector.update_status'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1547,8 +2099,14 @@ export default class Connector { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/count.ts b/src/api/api/count.ts index 6e060b369..69b85545f 100644 --- a/src/api/api/count.ts +++ b/src/api/api/count.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,18 +21,54 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +const acceptedParams: Record = { + count: { + path: [ + 'index' + ], + body: [ + 'query' + ], + query: [ + 'allow_no_indices', + 'analyzer', + 'analyze_wildcard', + 'default_operator', + 'df', + 'expand_wildcards', + 'ignore_throttled', + 'ignore_unavailable', + 'lenient', + 'min_score', + 'preference', + 'routing', + 'terminate_after', + 'q' + ] + } +} /** * Count search results. Get the number of documents matching a query. The query can be provided either by using a simple query string as a parameter, or by defining Query DSL within the request body. The query is optional. When no query is provided, the API uses `match_all` to count all the documents. The count API supports multi-target syntax. You can run a single count API search across multiple data streams and indices. The operation is broadcast across all shards. For each shard ID group, a replica is chosen and the search is run against it. This means that replicas increase the scalability of the count. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-count | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-count | Elasticsearch API documentation} */ export default async function CountApi (this: That, params?: T.CountRequest, options?: TransportRequestOptionsWithOutMeta): Promise export default async function CountApi (this: That, params?: T.CountRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function CountApi (this: That, params?: T.CountRequest, options?: TransportRequestOptions): Promise export default async function CountApi (this: That, params?: T.CountRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['query'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = acceptedParams.count + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -69,8 +91,14 @@ export default async function CountApi (this: That, params?: T.CountRequest, opt } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/create.ts b/src/api/api/create.ts index c8c663fa3..c63a5abf5 100644 --- a/src/api/api/create.ts +++ b/src/api/api/create.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,18 +21,54 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +const acceptedParams: Record = { + create: { + path: [ + 'id', + 'index' + ], + body: [ + 'document' + ], + query: [ + 'if_primary_term', + 'if_seq_no', + 'include_source_on_error', + 'op_type', + 'pipeline', + 'refresh', + 'require_alias', + 'require_data_stream', + 'routing', + 'timeout', + 'version', + 'version_type', + 'wait_for_active_shards' + ] + } +} /** * Create a new document in the index. You can index a new JSON document with the `//_doc/` or `//_create/<_id>` APIs Using `_create` guarantees that the document is indexed only if it does not already exist. It returns a 409 response when a document with a same ID already exists in the index. To update an existing document, you must use the `//_doc/` API. If the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or index alias: * To add a document using the `PUT //_create/<_id>` or `POST //_create/<_id>` request formats, you must have the `create_doc`, `create`, `index`, or `write` index privilege. * To automatically create a data stream or index with this API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege. Automatic data stream creation requires a matching index template with data stream enabled. **Automatically create data streams and indices** If the request's target doesn't exist and matches an index template with a `data_stream` definition, the index operation automatically creates the data stream. If the target doesn't exist and doesn't match a data stream template, the operation automatically creates the index and applies any matching index templates. NOTE: Elasticsearch includes several built-in index templates. To avoid naming collisions with these templates, refer to index pattern documentation. If no mapping exists, the index operation creates a dynamic mapping. By default, new fields and objects are automatically added to the mapping if needed. Automatic index creation is controlled by the `action.auto_create_index` setting. If it is `true`, any index can be created automatically. You can modify this setting to explicitly allow or block automatic creation of indices that match specified patterns or set it to `false` to turn off automatic index creation entirely. Specify a comma-separated list of patterns you want to allow or prefix each pattern with `+` or `-` to indicate whether it should be allowed or blocked. When a list is specified, the default behaviour is to disallow. NOTE: The `action.auto_create_index` setting affects the automatic creation of indices only. It does not affect the creation of data streams. **Routing** By default, shard placement — or routing — is controlled by using a hash of the document's ID value. For more explicit control, the value fed into the hash function used by the router can be directly specified on a per-operation basis using the `routing` parameter. When setting up explicit mapping, you can also use the `_routing` field to direct the index operation to extract the routing value from the document itself. This does come at the (very minimal) cost of an additional document parsing pass. If the `_routing` mapping is defined and set to be required, the index operation will fail if no routing value is provided or extracted. NOTE: Data streams do not support custom routing unless they were created with the `allow_custom_routing` setting enabled in the template. **Distributed** The index operation is directed to the primary shard based on its route and performed on the actual node containing this shard. After the primary shard completes the operation, if needed, the update is distributed to applicable replicas. **Active shards** To improve the resiliency of writes to the system, indexing operations can be configured to wait for a certain number of active shard copies before proceeding with the operation. If the requisite number of active shard copies are not available, then the write operation must wait and retry, until either the requisite shard copies have started or a timeout occurs. By default, write operations only wait for the primary shards to be active before proceeding (that is to say `wait_for_active_shards` is `1`). This default can be overridden in the index settings dynamically by setting `index.write.wait_for_active_shards`. To alter this behavior per operation, use the `wait_for_active_shards request` parameter. Valid values are all or any positive integer up to the total number of configured copies per shard in the index (which is `number_of_replicas`+1). Specifying a negative value or a number greater than the number of shard copies will throw an error. For example, suppose you have a cluster of three nodes, A, B, and C and you create an index index with the number of replicas set to 3 (resulting in 4 shard copies, one more copy than there are nodes). If you attempt an indexing operation, by default the operation will only ensure the primary copy of each shard is available before proceeding. This means that even if B and C went down and A hosted the primary shard copies, the indexing operation would still proceed with only one copy of the data. If `wait_for_active_shards` is set on the request to `3` (and all three nodes are up), the indexing operation will require 3 active shard copies before proceeding. This requirement should be met because there are 3 active nodes in the cluster, each one holding a copy of the shard. However, if you set `wait_for_active_shards` to `all` (or to `4`, which is the same in this situation), the indexing operation will not proceed as you do not have all 4 copies of each shard active in the index. The operation will timeout unless a new node is brought up in the cluster to host the fourth copy of the shard. It is important to note that this setting greatly reduces the chances of the write operation not writing to the requisite number of shard copies, but it does not completely eliminate the possibility, because this check occurs before the write operation starts. After the write operation is underway, it is still possible for replication to fail on any number of shard copies but still succeed on the primary. The `_shards` section of the API response reveals the number of shard copies on which replication succeeded and failed. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-create | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-create | Elasticsearch API documentation} */ export default async function CreateApi (this: That, params: T.CreateRequest, options?: TransportRequestOptionsWithOutMeta): Promise export default async function CreateApi (this: That, params: T.CreateRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function CreateApi (this: That, params: T.CreateRequest, options?: TransportRequestOptions): Promise export default async function CreateApi (this: That, params: T.CreateRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id', 'index'] - const acceptedBody: string[] = ['document'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = acceptedParams.create + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -58,8 +80,14 @@ export default async function CreateApi (this: That, params } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/dangling_indices.ts b/src/api/api/dangling_indices.ts index e8dc5399d..b58b65f9c 100644 --- a/src/api/api/dangling_indices.ts +++ b/src/api/api/dangling_indices.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,23 +21,60 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} export default class DanglingIndices { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'dangling_indices.delete_dangling_index': { + path: [ + 'index_uuid' + ], + body: [], + query: [ + 'accept_data_loss', + 'master_timeout', + 'timeout' + ] + }, + 'dangling_indices.import_dangling_index': { + path: [ + 'index_uuid' + ], + body: [], + query: [ + 'accept_data_loss', + 'master_timeout', + 'timeout' + ] + }, + 'dangling_indices.list_dangling_indices': { + path: [], + body: [], + query: [] + } + } } /** * Delete a dangling index. If Elasticsearch encounters index data that is absent from the current cluster state, those indices are considered to be dangling. For example, this can happen if you delete more than `cluster.indices.tombstones.size` indices while an Elasticsearch node is offline. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-dangling-indices-delete-dangling-index | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-dangling-indices-delete-dangling-index | Elasticsearch API documentation} */ async deleteDanglingIndex (this: That, params: T.DanglingIndicesDeleteDanglingIndexRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteDanglingIndex (this: That, params: T.DanglingIndicesDeleteDanglingIndexRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteDanglingIndex (this: That, params: T.DanglingIndicesDeleteDanglingIndexRequest, options?: TransportRequestOptions): Promise async deleteDanglingIndex (this: That, params: T.DanglingIndicesDeleteDanglingIndexRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index_uuid'] + const { + path: acceptedPath + } = this.acceptedParams['dangling_indices.delete_dangling_index'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -87,13 +110,16 @@ export default class DanglingIndices { /** * Import a dangling index. If Elasticsearch encounters index data that is absent from the current cluster state, those indices are considered to be dangling. For example, this can happen if you delete more than `cluster.indices.tombstones.size` indices while an Elasticsearch node is offline. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-dangling-indices-import-dangling-index | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-dangling-indices-import-dangling-index | Elasticsearch API documentation} */ async importDanglingIndex (this: That, params: T.DanglingIndicesImportDanglingIndexRequest, options?: TransportRequestOptionsWithOutMeta): Promise async importDanglingIndex (this: That, params: T.DanglingIndicesImportDanglingIndexRequest, options?: TransportRequestOptionsWithMeta): Promise> async importDanglingIndex (this: That, params: T.DanglingIndicesImportDanglingIndexRequest, options?: TransportRequestOptions): Promise async importDanglingIndex (this: That, params: T.DanglingIndicesImportDanglingIndexRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index_uuid'] + const { + path: acceptedPath + } = this.acceptedParams['dangling_indices.import_dangling_index'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -129,13 +155,16 @@ export default class DanglingIndices { /** * Get the dangling indices. If Elasticsearch encounters index data that is absent from the current cluster state, those indices are considered to be dangling. For example, this can happen if you delete more than `cluster.indices.tombstones.size` indices while an Elasticsearch node is offline. Use this API to list dangling indices, which you can then import or delete. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-dangling-indices-list-dangling-indices | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-dangling-indices-list-dangling-indices | Elasticsearch API documentation} */ async listDanglingIndices (this: That, params?: T.DanglingIndicesListDanglingIndicesRequest, options?: TransportRequestOptionsWithOutMeta): Promise async listDanglingIndices (this: That, params?: T.DanglingIndicesListDanglingIndicesRequest, options?: TransportRequestOptionsWithMeta): Promise> async listDanglingIndices (this: That, params?: T.DanglingIndicesListDanglingIndicesRequest, options?: TransportRequestOptions): Promise async listDanglingIndices (this: That, params?: T.DanglingIndicesListDanglingIndicesRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['dangling_indices.list_dangling_indices'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/delete.ts b/src/api/api/delete.ts index 63b4cf22b..8a207e817 100644 --- a/src/api/api/delete.ts +++ b/src/api/api/delete.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,17 +21,43 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const acceptedParams: Record = { + delete: { + path: [ + 'id', + 'index' + ], + body: [], + query: [ + 'if_primary_term', + 'if_seq_no', + 'refresh', + 'routing', + 'timeout', + 'version', + 'version_type', + 'wait_for_active_shards' + ] + } +} /** * Delete a document. Remove a JSON document from the specified index. NOTE: You cannot send deletion requests directly to a data stream. To delete a document in a data stream, you must target the backing index containing the document. **Optimistic concurrency control** Delete operations can be made conditional and only be performed if the last modification to the document was assigned the sequence number and primary term specified by the `if_seq_no` and `if_primary_term` parameters. If a mismatch is detected, the operation will result in a `VersionConflictException` and a status code of `409`. **Versioning** Each document indexed is versioned. When deleting a document, the version can be specified to make sure the relevant document you are trying to delete is actually being deleted and it has not changed in the meantime. Every write operation run on a document, deletes included, causes its version to be incremented. The version number of a deleted document remains available for a short time after deletion to allow for control of concurrent operations. The length of time for which a deleted document's version remains available is determined by the `index.gc_deletes` index setting. **Routing** If routing is used during indexing, the routing value also needs to be specified to delete a document. If the `_routing` mapping is set to `required` and no routing value is specified, the delete API throws a `RoutingMissingException` and rejects the request. For example: ``` DELETE /my-index-000001/_doc/1?routing=shard-1 ``` This request deletes the document with ID 1, but it is routed based on the user. The document is not deleted if the correct routing is not specified. **Distributed** The delete operation gets hashed into a specific shard ID. It then gets redirected into the primary shard within that ID group and replicated (if needed) to shard replicas within that ID group. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-delete | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-delete | Elasticsearch API documentation} */ export default async function DeleteApi (this: That, params: T.DeleteRequest, options?: TransportRequestOptionsWithOutMeta): Promise export default async function DeleteApi (this: That, params: T.DeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function DeleteApi (this: That, params: T.DeleteRequest, options?: TransportRequestOptions): Promise export default async function DeleteApi (this: That, params: T.DeleteRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id', 'index'] + const { + path: acceptedPath + } = acceptedParams.delete + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/delete_by_query.ts b/src/api/api/delete_by_query.ts index f99e09670..4604c0ac1 100644 --- a/src/api/api/delete_by_query.ts +++ b/src/api/api/delete_by_query.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,18 +21,71 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +const acceptedParams: Record = { + delete_by_query: { + path: [ + 'index' + ], + body: [ + 'max_docs', + 'query', + 'slice' + ], + query: [ + 'allow_no_indices', + 'analyzer', + 'analyze_wildcard', + 'conflicts', + 'default_operator', + 'df', + 'expand_wildcards', + 'from', + 'ignore_unavailable', + 'lenient', + 'max_docs', + 'preference', + 'refresh', + 'request_cache', + 'requests_per_second', + 'routing', + 'q', + 'scroll', + 'scroll_size', + 'search_timeout', + 'search_type', + 'slices', + 'sort', + 'stats', + 'terminate_after', + 'timeout', + 'version', + 'wait_for_active_shards', + 'wait_for_completion' + ] + } +} /** * Delete documents. Deletes documents that match the specified query. If the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or alias: * `read` * `delete` or `write` You can specify the query criteria in the request URI or the request body using the same syntax as the search API. When you submit a delete by query request, Elasticsearch gets a snapshot of the data stream or index when it begins processing the request and deletes matching documents using internal versioning. If a document changes between the time that the snapshot is taken and the delete operation is processed, it results in a version conflict and the delete operation fails. NOTE: Documents with a version equal to 0 cannot be deleted using delete by query because internal versioning does not support 0 as a valid version number. While processing a delete by query request, Elasticsearch performs multiple search requests sequentially to find all of the matching documents to delete. A bulk delete request is performed for each batch of matching documents. If a search or bulk request is rejected, the requests are retried up to 10 times, with exponential back off. If the maximum retry limit is reached, processing halts and all failed requests are returned in the response. Any delete requests that completed successfully still stick, they are not rolled back. You can opt to count version conflicts instead of halting and returning by setting `conflicts` to `proceed`. Note that if you opt to count version conflicts the operation could attempt to delete more documents from the source than `max_docs` until it has successfully deleted `max_docs documents`, or it has gone through every document in the source query. **Throttling delete requests** To control the rate at which delete by query issues batches of delete operations, you can set `requests_per_second` to any positive decimal number. This pads each batch with a wait time to throttle the rate. Set `requests_per_second` to `-1` to disable throttling. Throttling uses a wait time between batches so that the internal scroll requests can be given a timeout that takes the request padding into account. The padding time is the difference between the batch size divided by the `requests_per_second` and the time spent writing. By default the batch size is `1000`, so if `requests_per_second` is set to `500`: ``` target_time = 1000 / 500 per second = 2 seconds wait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds ``` Since the batch is issued as a single `_bulk` request, large batch sizes cause Elasticsearch to create many requests and wait before starting the next set. This is "bursty" instead of "smooth". **Slicing** Delete by query supports sliced scroll to parallelize the delete process. This can improve efficiency and provide a convenient way to break the request down into smaller parts. Setting `slices` to `auto` lets Elasticsearch choose the number of slices to use. This setting will use one slice per shard, up to a certain limit. If there are multiple source data streams or indices, it will choose the number of slices based on the index or backing index with the smallest number of shards. Adding slices to the delete by query operation creates sub-requests which means it has some quirks: * You can see these requests in the tasks APIs. These sub-requests are "child" tasks of the task for the request with slices. * Fetching the status of the task for the request with slices only contains the status of completed slices. * These sub-requests are individually addressable for things like cancellation and rethrottling. * Rethrottling the request with `slices` will rethrottle the unfinished sub-request proportionally. * Canceling the request with `slices` will cancel each sub-request. * Due to the nature of `slices` each sub-request won't get a perfectly even portion of the documents. All documents will be addressed, but some slices may be larger than others. Expect larger slices to have a more even distribution. * Parameters like `requests_per_second` and `max_docs` on a request with `slices` are distributed proportionally to each sub-request. Combine that with the earlier point about distribution being uneven and you should conclude that using `max_docs` with `slices` might not result in exactly `max_docs` documents being deleted. * Each sub-request gets a slightly different snapshot of the source data stream or index though these are all taken at approximately the same time. If you're slicing manually or otherwise tuning automatic slicing, keep in mind that: * Query performance is most efficient when the number of slices is equal to the number of shards in the index or backing index. If that number is large (for example, 500), choose a lower number as too many `slices` hurts performance. Setting `slices` higher than the number of shards generally does not improve efficiency and adds overhead. * Delete performance scales linearly across available resources with the number of slices. Whether query or delete performance dominates the runtime depends on the documents being reindexed and cluster resources. **Cancel a delete by query operation** Any delete by query can be canceled using the task cancel API. For example: ``` POST _tasks/r1A2WoRbTwKZ516z6NEs5A:36619/_cancel ``` The task ID can be found by using the get tasks API. Cancellation should happen quickly but might take a few seconds. The get task status API will continue to list the delete by query task until this task checks that it has been cancelled and terminates itself. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-delete-by-query | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-delete-by-query | Elasticsearch API documentation} */ export default async function DeleteByQueryApi (this: That, params: T.DeleteByQueryRequest, options?: TransportRequestOptionsWithOutMeta): Promise export default async function DeleteByQueryApi (this: That, params: T.DeleteByQueryRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function DeleteByQueryApi (this: That, params: T.DeleteByQueryRequest, options?: TransportRequestOptions): Promise export default async function DeleteByQueryApi (this: That, params: T.DeleteByQueryRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['max_docs', 'query', 'slice'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = acceptedParams.delete_by_query + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -68,8 +107,14 @@ export default async function DeleteByQueryApi (this: That, params: T.DeleteByQu } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/delete_by_query_rethrottle.ts b/src/api/api/delete_by_query_rethrottle.ts index 4da430635..bcb21062d 100644 --- a/src/api/api/delete_by_query_rethrottle.ts +++ b/src/api/api/delete_by_query_rethrottle.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,17 +21,35 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const acceptedParams: Record = { + delete_by_query_rethrottle: { + path: [ + 'task_id' + ], + body: [], + query: [ + 'requests_per_second' + ] + } +} /** * Throttle a delete by query operation. Change the number of requests per second for a particular delete by query operation. Rethrottling that speeds up the query takes effect immediately but rethrotting that slows down the query takes effect after completing the current batch to prevent scroll timeouts. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-delete-by-query-rethrottle | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-delete-by-query-rethrottle | Elasticsearch API documentation} */ export default async function DeleteByQueryRethrottleApi (this: That, params: T.DeleteByQueryRethrottleRequest, options?: TransportRequestOptionsWithOutMeta): Promise export default async function DeleteByQueryRethrottleApi (this: That, params: T.DeleteByQueryRethrottleRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function DeleteByQueryRethrottleApi (this: That, params: T.DeleteByQueryRethrottleRequest, options?: TransportRequestOptions): Promise export default async function DeleteByQueryRethrottleApi (this: That, params: T.DeleteByQueryRethrottleRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['task_id'] + const { + path: acceptedPath + } = acceptedParams.delete_by_query_rethrottle + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/delete_script.ts b/src/api/api/delete_script.ts index e6519dffd..d7761c571 100644 --- a/src/api/api/delete_script.ts +++ b/src/api/api/delete_script.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,17 +21,36 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const acceptedParams: Record = { + delete_script: { + path: [ + 'id' + ], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + } +} /** * Delete a script or search template. Deletes a stored script or search template. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-delete-script | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-delete-script | Elasticsearch API documentation} */ export default async function DeleteScriptApi (this: That, params: T.DeleteScriptRequest, options?: TransportRequestOptionsWithOutMeta): Promise export default async function DeleteScriptApi (this: That, params: T.DeleteScriptRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function DeleteScriptApi (this: That, params: T.DeleteScriptRequest, options?: TransportRequestOptions): Promise export default async function DeleteScriptApi (this: That, params: T.DeleteScriptRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] + const { + path: acceptedPath + } = acceptedParams.delete_script + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/enrich.ts b/src/api/api/enrich.ts index ea301cac5..16ba0ac8e 100644 --- a/src/api/api/enrich.ts +++ b/src/api/api/enrich.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,23 +21,83 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class Enrich { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'enrich.delete_policy': { + path: [ + 'name' + ], + body: [], + query: [ + 'master_timeout' + ] + }, + 'enrich.execute_policy': { + path: [ + 'name' + ], + body: [], + query: [ + 'master_timeout', + 'wait_for_completion' + ] + }, + 'enrich.get_policy': { + path: [ + 'name' + ], + body: [], + query: [ + 'master_timeout' + ] + }, + 'enrich.put_policy': { + path: [ + 'name' + ], + body: [ + 'geo_match', + 'match', + 'range' + ], + query: [ + 'master_timeout' + ] + }, + 'enrich.stats': { + path: [], + body: [], + query: [ + 'master_timeout' + ] + } + } } /** * Delete an enrich policy. Deletes an existing enrich policy and its enrich index. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-enrich-delete-policy | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-enrich-delete-policy | Elasticsearch API documentation} */ async deletePolicy (this: That, params: T.EnrichDeletePolicyRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deletePolicy (this: That, params: T.EnrichDeletePolicyRequest, options?: TransportRequestOptionsWithMeta): Promise> async deletePolicy (this: That, params: T.EnrichDeletePolicyRequest, options?: TransportRequestOptions): Promise async deletePolicy (this: That, params: T.EnrichDeletePolicyRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['enrich.delete_policy'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -87,13 +133,16 @@ export default class Enrich { /** * Run an enrich policy. Create the enrich index for an existing enrich policy. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-enrich-execute-policy | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-enrich-execute-policy | Elasticsearch API documentation} */ async executePolicy (this: That, params: T.EnrichExecutePolicyRequest, options?: TransportRequestOptionsWithOutMeta): Promise async executePolicy (this: That, params: T.EnrichExecutePolicyRequest, options?: TransportRequestOptionsWithMeta): Promise> async executePolicy (this: That, params: T.EnrichExecutePolicyRequest, options?: TransportRequestOptions): Promise async executePolicy (this: That, params: T.EnrichExecutePolicyRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['enrich.execute_policy'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -129,13 +178,16 @@ export default class Enrich { /** * Get an enrich policy. Returns information about an enrich policy. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-enrich-get-policy | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-enrich-get-policy | Elasticsearch API documentation} */ async getPolicy (this: That, params?: T.EnrichGetPolicyRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getPolicy (this: That, params?: T.EnrichGetPolicyRequest, options?: TransportRequestOptionsWithMeta): Promise> async getPolicy (this: That, params?: T.EnrichGetPolicyRequest, options?: TransportRequestOptions): Promise async getPolicy (this: That, params?: T.EnrichGetPolicyRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['enrich.get_policy'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -179,14 +231,18 @@ export default class Enrich { /** * Create an enrich policy. Creates an enrich policy. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-enrich-put-policy | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-enrich-put-policy | Elasticsearch API documentation} */ async putPolicy (this: That, params: T.EnrichPutPolicyRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putPolicy (this: That, params: T.EnrichPutPolicyRequest, options?: TransportRequestOptionsWithMeta): Promise> async putPolicy (this: That, params: T.EnrichPutPolicyRequest, options?: TransportRequestOptions): Promise async putPolicy (this: That, params: T.EnrichPutPolicyRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] - const acceptedBody: string[] = ['geo_match', 'match', 'range'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['enrich.put_policy'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -208,8 +264,14 @@ export default class Enrich { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -226,13 +288,16 @@ export default class Enrich { /** * Get enrich stats. Returns enrich coordinator statistics and information about enrich policies that are currently executing. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-enrich-stats | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-enrich-stats | Elasticsearch API documentation} */ async stats (this: That, params?: T.EnrichStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async stats (this: That, params?: T.EnrichStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> async stats (this: That, params?: T.EnrichStatsRequest, options?: TransportRequestOptions): Promise async stats (this: That, params?: T.EnrichStatsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['enrich.stats'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/eql.ts b/src/api/api/eql.ts index 9f490aca9..129c59265 100644 --- a/src/api/api/eql.ts +++ b/src/api/api/eql.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,23 +21,93 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class Eql { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'eql.delete': { + path: [ + 'id' + ], + body: [], + query: [] + }, + 'eql.get': { + path: [ + 'id' + ], + body: [], + query: [ + 'keep_alive', + 'wait_for_completion_timeout' + ] + }, + 'eql.get_status': { + path: [ + 'id' + ], + body: [], + query: [] + }, + 'eql.search': { + path: [ + 'index' + ], + body: [ + 'query', + 'case_sensitive', + 'event_category_field', + 'tiebreaker_field', + 'timestamp_field', + 'fetch_size', + 'filter', + 'keep_alive', + 'keep_on_completion', + 'wait_for_completion_timeout', + 'allow_partial_search_results', + 'allow_partial_sequence_results', + 'size', + 'fields', + 'result_position', + 'runtime_mappings', + 'max_samples_per_key' + ], + query: [ + 'allow_no_indices', + 'allow_partial_search_results', + 'allow_partial_sequence_results', + 'expand_wildcards', + 'ignore_unavailable', + 'keep_alive', + 'keep_on_completion', + 'wait_for_completion_timeout' + ] + } + } } /** * Delete an async EQL search. Delete an async EQL search or a stored synchronous EQL search. The API also deletes results for the search. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-eql-delete | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-eql-delete | Elasticsearch API documentation} */ async delete (this: That, params: T.EqlDeleteRequest, options?: TransportRequestOptionsWithOutMeta): Promise async delete (this: That, params: T.EqlDeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> async delete (this: That, params: T.EqlDeleteRequest, options?: TransportRequestOptions): Promise async delete (this: That, params: T.EqlDeleteRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] + const { + path: acceptedPath + } = this.acceptedParams['eql.delete'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -87,13 +143,16 @@ export default class Eql { /** * Get async EQL search results. Get the current status and available results for an async EQL search or a stored synchronous EQL search. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-eql-get | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-eql-get | Elasticsearch API documentation} */ async get (this: That, params: T.EqlGetRequest, options?: TransportRequestOptionsWithOutMeta): Promise> async get (this: That, params: T.EqlGetRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> async get (this: That, params: T.EqlGetRequest, options?: TransportRequestOptions): Promise> async get (this: That, params: T.EqlGetRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] + const { + path: acceptedPath + } = this.acceptedParams['eql.get'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -129,13 +188,16 @@ export default class Eql { /** * Get the async EQL status. Get the current status for an async EQL search or a stored synchronous EQL search without returning results. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-eql-get-status | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-eql-get-status | Elasticsearch API documentation} */ async getStatus (this: That, params: T.EqlGetStatusRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getStatus (this: That, params: T.EqlGetStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> async getStatus (this: That, params: T.EqlGetStatusRequest, options?: TransportRequestOptions): Promise async getStatus (this: That, params: T.EqlGetStatusRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] + const { + path: acceptedPath + } = this.acceptedParams['eql.get_status'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -171,14 +233,18 @@ export default class Eql { /** * Get EQL search results. Returns search results for an Event Query Language (EQL) query. EQL assumes each document in a data stream or index corresponds to an event. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-eql-search | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-eql-search | Elasticsearch API documentation} */ async search (this: That, params: T.EqlSearchRequest, options?: TransportRequestOptionsWithOutMeta): Promise> async search (this: That, params: T.EqlSearchRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> async search (this: That, params: T.EqlSearchRequest, options?: TransportRequestOptions): Promise> async search (this: That, params: T.EqlSearchRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['query', 'case_sensitive', 'event_category_field', 'tiebreaker_field', 'timestamp_field', 'fetch_size', 'filter', 'keep_alive', 'keep_on_completion', 'wait_for_completion_timeout', 'allow_partial_search_results', 'allow_partial_sequence_results', 'size', 'fields', 'result_position', 'runtime_mappings', 'max_samples_per_key'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['eql.search'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -200,8 +266,14 @@ export default class Eql { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/esql.ts b/src/api/api/esql.ts index d76ed6962..09aa54957 100644 --- a/src/api/api/esql.ts +++ b/src/api/api/esql.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,24 +21,104 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class Esql { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'esql.async_query': { + path: [], + body: [ + 'columnar', + 'filter', + 'locale', + 'params', + 'profile', + 'query', + 'tables', + 'include_ccs_metadata', + 'wait_for_completion_timeout' + ], + query: [ + 'delimiter', + 'drop_null_columns', + 'format', + 'keep_alive', + 'keep_on_completion', + 'wait_for_completion_timeout' + ] + }, + 'esql.async_query_delete': { + path: [ + 'id' + ], + body: [], + query: [] + }, + 'esql.async_query_get': { + path: [ + 'id' + ], + body: [], + query: [ + 'drop_null_columns', + 'keep_alive', + 'wait_for_completion_timeout' + ] + }, + 'esql.async_query_stop': { + path: [ + 'id' + ], + body: [], + query: [ + 'drop_null_columns' + ] + }, + 'esql.query': { + path: [], + body: [ + 'columnar', + 'filter', + 'locale', + 'params', + 'profile', + 'query', + 'tables', + 'include_ccs_metadata' + ], + query: [ + 'format', + 'delimiter', + 'drop_null_columns' + ] + } + } } /** * Run an async ES|QL query. Asynchronously run an ES|QL (Elasticsearch query language) query, monitor its progress, and retrieve results when they become available. The API accepts the same parameters and request body as the synchronous query API, along with additional async related properties. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-esql-async-query | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-esql-async-query | Elasticsearch API documentation} */ async asyncQuery (this: That, params: T.EsqlAsyncQueryRequest, options?: TransportRequestOptionsWithOutMeta): Promise async asyncQuery (this: That, params: T.EsqlAsyncQueryRequest, options?: TransportRequestOptionsWithMeta): Promise> async asyncQuery (this: That, params: T.EsqlAsyncQueryRequest, options?: TransportRequestOptions): Promise async asyncQuery (this: That, params: T.EsqlAsyncQueryRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['columnar', 'filter', 'locale', 'params', 'profile', 'query', 'tables', 'include_ccs_metadata'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['esql.async_query'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -74,8 +140,14 @@ export default class Esql { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -89,13 +161,16 @@ export default class Esql { /** * Delete an async ES|QL query. If the query is still running, it is cancelled. Otherwise, the stored results are deleted. If the Elasticsearch security features are enabled, only the following users can use this API to delete a query: * The authenticated user that submitted the original query request * Users with the `cancel_task` cluster privilege - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-esql-async-query-delete | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-esql-async-query-delete | Elasticsearch API documentation} */ async asyncQueryDelete (this: That, params: T.EsqlAsyncQueryDeleteRequest, options?: TransportRequestOptionsWithOutMeta): Promise async asyncQueryDelete (this: That, params: T.EsqlAsyncQueryDeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> async asyncQueryDelete (this: That, params: T.EsqlAsyncQueryDeleteRequest, options?: TransportRequestOptions): Promise async asyncQueryDelete (this: That, params: T.EsqlAsyncQueryDeleteRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] + const { + path: acceptedPath + } = this.acceptedParams['esql.async_query_delete'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -131,13 +206,16 @@ export default class Esql { /** * Get async ES|QL query results. Get the current status and available results or stored results for an ES|QL asynchronous query. If the Elasticsearch security features are enabled, only the user who first submitted the ES|QL query can retrieve the results using this API. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-esql-async-query-get | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-esql-async-query-get | Elasticsearch API documentation} */ async asyncQueryGet (this: That, params: T.EsqlAsyncQueryGetRequest, options?: TransportRequestOptionsWithOutMeta): Promise async asyncQueryGet (this: That, params: T.EsqlAsyncQueryGetRequest, options?: TransportRequestOptionsWithMeta): Promise> async asyncQueryGet (this: That, params: T.EsqlAsyncQueryGetRequest, options?: TransportRequestOptions): Promise async asyncQueryGet (this: That, params: T.EsqlAsyncQueryGetRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] + const { + path: acceptedPath + } = this.acceptedParams['esql.async_query_get'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -173,13 +251,16 @@ export default class Esql { /** * Stop async ES|QL query. This API interrupts the query execution and returns the results so far. If the Elasticsearch security features are enabled, only the user who first submitted the ES|QL query can stop it. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/esql-async-query-stop-api.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-esql-async-query-stop | Elasticsearch API documentation} */ async asyncQueryStop (this: That, params: T.EsqlAsyncQueryStopRequest, options?: TransportRequestOptionsWithOutMeta): Promise async asyncQueryStop (this: That, params: T.EsqlAsyncQueryStopRequest, options?: TransportRequestOptionsWithMeta): Promise> async asyncQueryStop (this: That, params: T.EsqlAsyncQueryStopRequest, options?: TransportRequestOptions): Promise async asyncQueryStop (this: That, params: T.EsqlAsyncQueryStopRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] + const { + path: acceptedPath + } = this.acceptedParams['esql.async_query_stop'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -215,14 +296,18 @@ export default class Esql { /** * Run an ES|QL query. Get search results for an ES|QL (Elasticsearch query language) query. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/esql-rest.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/explore-analyze/query-filter/languages/esql-rest | Elasticsearch API documentation} */ async query (this: That, params: T.EsqlQueryRequest, options?: TransportRequestOptionsWithOutMeta): Promise async query (this: That, params: T.EsqlQueryRequest, options?: TransportRequestOptionsWithMeta): Promise> async query (this: That, params: T.EsqlQueryRequest, options?: TransportRequestOptions): Promise async query (this: That, params: T.EsqlQueryRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['columnar', 'filter', 'locale', 'params', 'profile', 'query', 'tables', 'include_ccs_metadata'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['esql.query'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -244,8 +329,14 @@ export default class Esql { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/exists.ts b/src/api/api/exists.ts index 0c5f99bde..058a3780e 100644 --- a/src/api/api/exists.ts +++ b/src/api/api/exists.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,17 +21,45 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const acceptedParams: Record = { + exists: { + path: [ + 'id', + 'index' + ], + body: [], + query: [ + 'preference', + 'realtime', + 'refresh', + 'routing', + '_source', + '_source_excludes', + '_source_includes', + 'stored_fields', + 'version', + 'version_type' + ] + } +} /** * Check a document. Verify that a document exists. For example, check to see if a document with the `_id` 0 exists: ``` HEAD my-index-000001/_doc/0 ``` If the document exists, the API returns a status code of `200 - OK`. If the document doesn’t exist, the API returns `404 - Not Found`. **Versioning support** You can use the `version` parameter to check the document only if its current version is equal to the specified one. Internally, Elasticsearch has marked the old document as deleted and added an entirely new document. The old version of the document doesn't disappear immediately, although you won't be able to access it. Elasticsearch cleans up deleted documents in the background as you continue to index more data. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-get | Elasticsearch API documentation} */ export default async function ExistsApi (this: That, params: T.ExistsRequest, options?: TransportRequestOptionsWithOutMeta): Promise export default async function ExistsApi (this: That, params: T.ExistsRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function ExistsApi (this: That, params: T.ExistsRequest, options?: TransportRequestOptions): Promise export default async function ExistsApi (this: That, params: T.ExistsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id', 'index'] + const { + path: acceptedPath + } = acceptedParams.exists + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/exists_source.ts b/src/api/api/exists_source.ts index 750302a6f..b1882ab45 100644 --- a/src/api/api/exists_source.ts +++ b/src/api/api/exists_source.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,17 +21,44 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const acceptedParams: Record = { + exists_source: { + path: [ + 'id', + 'index' + ], + body: [], + query: [ + 'preference', + 'realtime', + 'refresh', + 'routing', + '_source', + '_source_excludes', + '_source_includes', + 'version', + 'version_type' + ] + } +} /** * Check for a document source. Check whether a document source exists in an index. For example: ``` HEAD my-index-000001/_source/1 ``` A document's source is not available if it is disabled in the mapping. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-get | Elasticsearch API documentation} */ export default async function ExistsSourceApi (this: That, params: T.ExistsSourceRequest, options?: TransportRequestOptionsWithOutMeta): Promise export default async function ExistsSourceApi (this: That, params: T.ExistsSourceRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function ExistsSourceApi (this: That, params: T.ExistsSourceRequest, options?: TransportRequestOptions): Promise export default async function ExistsSourceApi (this: That, params: T.ExistsSourceRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id', 'index'] + const { + path: acceptedPath + } = acceptedParams.exists_source + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/explain.ts b/src/api/api/explain.ts index 16150530b..779edbb88 100644 --- a/src/api/api/explain.ts +++ b/src/api/api/explain.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,18 +21,53 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +const acceptedParams: Record = { + explain: { + path: [ + 'id', + 'index' + ], + body: [ + 'query' + ], + query: [ + 'analyzer', + 'analyze_wildcard', + 'default_operator', + 'df', + 'lenient', + 'preference', + 'routing', + '_source', + '_source_excludes', + '_source_includes', + 'stored_fields', + 'q' + ] + } +} /** * Explain a document match result. Get information about why a specific document matches, or doesn't match, a query. It computes a score explanation for a query and a specific document. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-explain | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-explain | Elasticsearch API documentation} */ export default async function ExplainApi (this: That, params: T.ExplainRequest, options?: TransportRequestOptionsWithOutMeta): Promise> export default async function ExplainApi (this: That, params: T.ExplainRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> export default async function ExplainApi (this: That, params: T.ExplainRequest, options?: TransportRequestOptions): Promise> export default async function ExplainApi (this: That, params: T.ExplainRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id', 'index'] - const acceptedBody: string[] = ['query'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = acceptedParams.explain + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -68,8 +89,14 @@ export default async function ExplainApi (this: That, param } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/features.ts b/src/api/api/features.ts index 670d84cda..7e4ffbdb8 100644 --- a/src/api/api/features.ts +++ b/src/api/api/features.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,23 +21,47 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} export default class Features { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'features.get_features': { + path: [], + body: [], + query: [ + 'master_timeout' + ] + }, + 'features.reset_features': { + path: [], + body: [], + query: [ + 'master_timeout' + ] + } + } } /** * Get the features. Get a list of features that can be included in snapshots using the `feature_states` field when creating a snapshot. You can use this API to determine which feature states to include when taking a snapshot. By default, all feature states are included in a snapshot if that snapshot includes the global state, or none if it does not. A feature state includes one or more system indices necessary for a given feature to function. In order to ensure data integrity, all system indices that comprise a feature state are snapshotted and restored together. The features listed by this API are a combination of built-in features and features defined by plugins. In order for a feature state to be listed in this API and recognized as a valid feature state by the create snapshot API, the plugin that defines that feature must be installed on the master node. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-features-get-features | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-features-get-features | Elasticsearch API documentation} */ async getFeatures (this: That, params?: T.FeaturesGetFeaturesRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getFeatures (this: That, params?: T.FeaturesGetFeaturesRequest, options?: TransportRequestOptionsWithMeta): Promise> async getFeatures (this: That, params?: T.FeaturesGetFeaturesRequest, options?: TransportRequestOptions): Promise async getFeatures (this: That, params?: T.FeaturesGetFeaturesRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['features.get_features'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -85,13 +95,16 @@ export default class Features { /** * Reset the features. Clear all of the state information stored in system indices by Elasticsearch features, including the security and machine learning indices. WARNING: Intended for development and testing use only. Do not reset features on a production cluster. Return a cluster to the same state as a new installation by resetting the feature state for all Elasticsearch features. This deletes all state information stored in system indices. The response code is HTTP 200 if the state is successfully reset for all features. It is HTTP 500 if the reset operation failed for any feature. Note that select features might provide a way to reset particular system indices. Using this API resets all features, both those that are built-in and implemented as plugins. To list the features that will be affected, use the get features API. IMPORTANT: The features installed on the node you submit this request to are the features that will be reset. Run on the master node if you have any doubts about which plugins are installed on individual nodes. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-features-reset-features | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-features-reset-features | Elasticsearch API documentation} */ async resetFeatures (this: That, params?: T.FeaturesResetFeaturesRequest, options?: TransportRequestOptionsWithOutMeta): Promise async resetFeatures (this: That, params?: T.FeaturesResetFeaturesRequest, options?: TransportRequestOptionsWithMeta): Promise> async resetFeatures (this: That, params?: T.FeaturesResetFeaturesRequest, options?: TransportRequestOptions): Promise async resetFeatures (this: That, params?: T.FeaturesResetFeaturesRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['features.reset_features'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/field_caps.ts b/src/api/api/field_caps.ts index de9d61a0e..667521cee 100644 --- a/src/api/api/field_caps.ts +++ b/src/api/api/field_caps.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,18 +21,50 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +const acceptedParams: Record = { + field_caps: { + path: [ + 'index' + ], + body: [ + 'fields', + 'index_filter', + 'runtime_mappings' + ], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'fields', + 'ignore_unavailable', + 'include_unmapped', + 'filters', + 'types', + 'include_empty_fields' + ] + } +} /** * Get the field capabilities. Get information about the capabilities of fields among multiple indices. For data streams, the API returns field capabilities among the stream’s backing indices. It returns runtime fields like any other field. For example, a runtime field with a type of keyword is returned the same as any other field that belongs to the `keyword` family. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-field-caps | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-field-caps | Elasticsearch API documentation} */ export default async function FieldCapsApi (this: That, params?: T.FieldCapsRequest, options?: TransportRequestOptionsWithOutMeta): Promise export default async function FieldCapsApi (this: That, params?: T.FieldCapsRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function FieldCapsApi (this: That, params?: T.FieldCapsRequest, options?: TransportRequestOptions): Promise export default async function FieldCapsApi (this: That, params?: T.FieldCapsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['fields', 'index_filter', 'runtime_mappings'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = acceptedParams.field_caps + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -69,8 +87,14 @@ export default async function FieldCapsApi (this: That, params?: T.FieldCapsRequ } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/fleet.ts b/src/api/api/fleet.ts index 042fcbfd1..49a7cdb4b 100644 --- a/src/api/api/fleet.ts +++ b/src/api/api/fleet.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,12 +21,159 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class Fleet { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'fleet.delete_secret': { + path: [ + 'id' + ], + body: [], + query: [] + }, + 'fleet.get_secret': { + path: [ + 'id' + ], + body: [], + query: [] + }, + 'fleet.global_checkpoints': { + path: [ + 'index' + ], + body: [], + query: [ + 'wait_for_advance', + 'wait_for_index', + 'checkpoints', + 'timeout' + ] + }, + 'fleet.msearch': { + path: [ + 'index' + ], + body: [ + 'searches' + ], + query: [ + 'allow_no_indices', + 'ccs_minimize_roundtrips', + 'expand_wildcards', + 'ignore_throttled', + 'ignore_unavailable', + 'max_concurrent_searches', + 'max_concurrent_shard_requests', + 'pre_filter_shard_size', + 'search_type', + 'rest_total_hits_as_int', + 'typed_keys', + 'wait_for_checkpoints', + 'allow_partial_search_results' + ] + }, + 'fleet.post_secret': { + path: [], + body: [], + query: [] + }, + 'fleet.search': { + path: [ + 'index' + ], + body: [ + 'aggregations', + 'aggs', + 'collapse', + 'explain', + 'ext', + 'from', + 'highlight', + 'track_total_hits', + 'indices_boost', + 'docvalue_fields', + 'min_score', + 'post_filter', + 'profile', + 'query', + 'rescore', + 'script_fields', + 'search_after', + 'size', + 'slice', + 'sort', + '_source', + 'fields', + 'suggest', + 'terminate_after', + 'timeout', + 'track_scores', + 'version', + 'seq_no_primary_term', + 'stored_fields', + 'pit', + 'runtime_mappings', + 'stats' + ], + query: [ + 'allow_no_indices', + 'analyzer', + 'analyze_wildcard', + 'batched_reduce_size', + 'ccs_minimize_roundtrips', + 'default_operator', + 'df', + 'docvalue_fields', + 'expand_wildcards', + 'explain', + 'ignore_throttled', + 'ignore_unavailable', + 'lenient', + 'max_concurrent_shard_requests', + 'preference', + 'pre_filter_shard_size', + 'request_cache', + 'routing', + 'scroll', + 'search_type', + 'stats', + 'stored_fields', + 'suggest_field', + 'suggest_mode', + 'suggest_size', + 'suggest_text', + 'terminate_after', + 'timeout', + 'track_total_hits', + 'track_scores', + 'typed_keys', + 'rest_total_hits_as_int', + 'version', + '_source', + '_source_excludes', + '_source_includes', + 'seq_no_primary_term', + 'q', + 'size', + 'from', + 'sort', + 'wait_for_checkpoints', + 'allow_partial_search_results' + ] + } + } } /** @@ -50,7 +183,10 @@ export default class Fleet { async deleteSecret (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> async deleteSecret (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise async deleteSecret (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] + const { + path: acceptedPath + } = this.acceptedParams['fleet.delete_secret'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -91,7 +227,10 @@ export default class Fleet { async getSecret (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> async getSecret (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise async getSecret (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] + const { + path: acceptedPath + } = this.acceptedParams['fleet.get_secret'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -127,13 +266,16 @@ export default class Fleet { /** * Get global checkpoints. Get the current global checkpoints for an index. This API is designed for internal use by the Fleet server project. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-fleet | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/group/endpoint-fleet | Elasticsearch API documentation} */ async globalCheckpoints (this: That, params: T.FleetGlobalCheckpointsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async globalCheckpoints (this: That, params: T.FleetGlobalCheckpointsRequest, options?: TransportRequestOptionsWithMeta): Promise> async globalCheckpoints (this: That, params: T.FleetGlobalCheckpointsRequest, options?: TransportRequestOptions): Promise async globalCheckpoints (this: That, params: T.FleetGlobalCheckpointsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] + const { + path: acceptedPath + } = this.acceptedParams['fleet.global_checkpoints'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -169,14 +311,18 @@ export default class Fleet { /** * Run multiple Fleet searches. Run several Fleet searches with a single API request. The API follows the same structure as the multi search API. However, similar to the Fleet search API, it supports the `wait_for_checkpoints` parameter. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-fleet-msearch | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-fleet-msearch | Elasticsearch API documentation} */ async msearch (this: That, params: T.FleetMsearchRequest, options?: TransportRequestOptionsWithOutMeta): Promise> async msearch (this: That, params: T.FleetMsearchRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> async msearch (this: That, params: T.FleetMsearchRequest, options?: TransportRequestOptions): Promise> async msearch (this: That, params: T.FleetMsearchRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['searches'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['fleet.msearch'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -188,8 +334,14 @@ export default class Fleet { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -218,7 +370,10 @@ export default class Fleet { async postSecret (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> async postSecret (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise async postSecret (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['fleet.post_secret'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -251,14 +406,18 @@ export default class Fleet { /** * Run a Fleet search. The purpose of the Fleet search API is to provide an API where the search will be run only after the provided checkpoint has been processed and is visible for searches inside of Elasticsearch. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-fleet-search | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-fleet-search | Elasticsearch API documentation} */ async search (this: That, params: T.FleetSearchRequest, options?: TransportRequestOptionsWithOutMeta): Promise> async search (this: That, params: T.FleetSearchRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> async search (this: That, params: T.FleetSearchRequest, options?: TransportRequestOptions): Promise> async search (this: That, params: T.FleetSearchRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['aggregations', 'aggs', 'collapse', 'explain', 'ext', 'from', 'highlight', 'track_total_hits', 'indices_boost', 'docvalue_fields', 'min_score', 'post_filter', 'profile', 'query', 'rescore', 'script_fields', 'search_after', 'size', 'slice', 'sort', '_source', 'fields', 'suggest', 'terminate_after', 'timeout', 'track_scores', 'version', 'seq_no_primary_term', 'stored_fields', 'pit', 'runtime_mappings', 'stats'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['fleet.search'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -280,8 +439,14 @@ export default class Fleet { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/get.ts b/src/api/api/get.ts index 3cb82914a..aa88afd23 100644 --- a/src/api/api/get.ts +++ b/src/api/api/get.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,17 +21,46 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const acceptedParams: Record = { + get: { + path: [ + 'id', + 'index' + ], + body: [], + query: [ + 'force_synthetic_source', + 'preference', + 'realtime', + 'refresh', + 'routing', + '_source', + '_source_excludes', + '_source_includes', + 'stored_fields', + 'version', + 'version_type' + ] + } +} /** * Get a document by its ID. Get a document and its source or stored fields from an index. By default, this API is realtime and is not affected by the refresh rate of the index (when data will become visible for search). In the case where stored fields are requested with the `stored_fields` parameter and the document has been updated but is not yet refreshed, the API will have to parse and analyze the source to extract the stored fields. To turn off realtime behavior, set the `realtime` parameter to false. **Source filtering** By default, the API returns the contents of the `_source` field unless you have used the `stored_fields` parameter or the `_source` field is turned off. You can turn off `_source` retrieval by using the `_source` parameter: ``` GET my-index-000001/_doc/0?_source=false ``` If you only need one or two fields from the `_source`, use the `_source_includes` or `_source_excludes` parameters to include or filter out particular fields. This can be helpful with large documents where partial retrieval can save on network overhead Both parameters take a comma separated list of fields or wildcard expressions. For example: ``` GET my-index-000001/_doc/0?_source_includes=*.id&_source_excludes=entities ``` If you only want to specify includes, you can use a shorter notation: ``` GET my-index-000001/_doc/0?_source=*.id ``` **Routing** If routing is used during indexing, the routing value also needs to be specified to retrieve a document. For example: ``` GET my-index-000001/_doc/2?routing=user1 ``` This request gets the document with ID 2, but it is routed based on the user. The document is not fetched if the correct routing is not specified. **Distributed** The GET operation is hashed into a specific shard ID. It is then redirected to one of the replicas within that shard ID and returns the result. The replicas are the primary shard and its replicas within that shard ID group. This means that the more replicas you have, the better your GET scaling will be. **Versioning support** You can use the `version` parameter to retrieve the document only if its current version is equal to the specified one. Internally, Elasticsearch has marked the old document as deleted and added an entirely new document. The old version of the document doesn't disappear immediately, although you won't be able to access it. Elasticsearch cleans up deleted documents in the background as you continue to index more data. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-get | Elasticsearch API documentation} */ export default async function GetApi (this: That, params: T.GetRequest, options?: TransportRequestOptionsWithOutMeta): Promise> export default async function GetApi (this: That, params: T.GetRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> export default async function GetApi (this: That, params: T.GetRequest, options?: TransportRequestOptions): Promise> export default async function GetApi (this: That, params: T.GetRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id', 'index'] + const { + path: acceptedPath + } = acceptedParams.get + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/get_script.ts b/src/api/api/get_script.ts index d079ba650..287694080 100644 --- a/src/api/api/get_script.ts +++ b/src/api/api/get_script.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,17 +21,35 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const acceptedParams: Record = { + get_script: { + path: [ + 'id' + ], + body: [], + query: [ + 'master_timeout' + ] + } +} /** * Get a script or search template. Retrieves a stored script or search template. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get-script | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-get-script | Elasticsearch API documentation} */ export default async function GetScriptApi (this: That, params: T.GetScriptRequest, options?: TransportRequestOptionsWithOutMeta): Promise export default async function GetScriptApi (this: That, params: T.GetScriptRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function GetScriptApi (this: That, params: T.GetScriptRequest, options?: TransportRequestOptions): Promise export default async function GetScriptApi (this: That, params: T.GetScriptRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] + const { + path: acceptedPath + } = acceptedParams.get_script + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/get_script_context.ts b/src/api/api/get_script_context.ts index b263ed089..88876cdfb 100644 --- a/src/api/api/get_script_context.ts +++ b/src/api/api/get_script_context.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,17 +21,31 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const acceptedParams: Record = { + get_script_context: { + path: [], + body: [], + query: [] + } +} /** * Get script contexts. Get a list of supported script contexts and their methods. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get-script-context | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-get-script-context | Elasticsearch API documentation} */ export default async function GetScriptContextApi (this: That, params?: T.GetScriptContextRequest, options?: TransportRequestOptionsWithOutMeta): Promise export default async function GetScriptContextApi (this: That, params?: T.GetScriptContextRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function GetScriptContextApi (this: That, params?: T.GetScriptContextRequest, options?: TransportRequestOptions): Promise export default async function GetScriptContextApi (this: That, params?: T.GetScriptContextRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = acceptedParams.get_script_context + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/get_script_languages.ts b/src/api/api/get_script_languages.ts index 7b52735c4..970244d88 100644 --- a/src/api/api/get_script_languages.ts +++ b/src/api/api/get_script_languages.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,17 +21,31 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const acceptedParams: Record = { + get_script_languages: { + path: [], + body: [], + query: [] + } +} /** * Get script languages. Get a list of available script types, languages, and contexts. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get-script-languages | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-get-script-languages | Elasticsearch API documentation} */ export default async function GetScriptLanguagesApi (this: That, params?: T.GetScriptLanguagesRequest, options?: TransportRequestOptionsWithOutMeta): Promise export default async function GetScriptLanguagesApi (this: That, params?: T.GetScriptLanguagesRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function GetScriptLanguagesApi (this: That, params?: T.GetScriptLanguagesRequest, options?: TransportRequestOptions): Promise export default async function GetScriptLanguagesApi (this: That, params?: T.GetScriptLanguagesRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = acceptedParams.get_script_languages + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/get_source.ts b/src/api/api/get_source.ts index a4eef8c97..759be1ad4 100644 --- a/src/api/api/get_source.ts +++ b/src/api/api/get_source.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,17 +21,45 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const acceptedParams: Record = { + get_source: { + path: [ + 'id', + 'index' + ], + body: [], + query: [ + 'preference', + 'realtime', + 'refresh', + 'routing', + '_source', + '_source_excludes', + '_source_includes', + 'stored_fields', + 'version', + 'version_type' + ] + } +} /** * Get a document's source. Get the source of a document. For example: ``` GET my-index-000001/_source/1 ``` You can use the source filtering parameters to control which parts of the `_source` are returned: ``` GET my-index-000001/_source/1/?_source_includes=*.id&_source_excludes=entities ``` - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-get | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-get | Elasticsearch API documentation} */ export default async function GetSourceApi (this: That, params: T.GetSourceRequest, options?: TransportRequestOptionsWithOutMeta): Promise> export default async function GetSourceApi (this: That, params: T.GetSourceRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> export default async function GetSourceApi (this: That, params: T.GetSourceRequest, options?: TransportRequestOptions): Promise> export default async function GetSourceApi (this: That, params: T.GetSourceRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id', 'index'] + const { + path: acceptedPath + } = acceptedParams.get_source + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/graph.ts b/src/api/api/graph.ts index 33534fe4a..fb0521d60 100644 --- a/src/api/api/graph.ts +++ b/src/api/api/graph.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,24 +21,52 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class Graph { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'graph.explore': { + path: [ + 'index' + ], + body: [ + 'connections', + 'controls', + 'query', + 'vertices' + ], + query: [ + 'routing', + 'timeout' + ] + } + } } /** * Explore graph analytics. Extract and summarize information about the documents and terms in an Elasticsearch data stream or index. The easiest way to understand the behavior of this API is to use the Graph UI to explore connections. An initial request to the `_explore` API contains a seed query that identifies the documents of interest and specifies the fields that define the vertices and connections you want to include in the graph. Subsequent requests enable you to spider out from one more vertices of interest. You can exclude vertices that have already been returned. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-graph | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/group/endpoint-graph | Elasticsearch API documentation} */ async explore (this: That, params: T.GraphExploreRequest, options?: TransportRequestOptionsWithOutMeta): Promise async explore (this: That, params: T.GraphExploreRequest, options?: TransportRequestOptionsWithMeta): Promise> async explore (this: That, params: T.GraphExploreRequest, options?: TransportRequestOptions): Promise async explore (this: That, params: T.GraphExploreRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['connections', 'controls', 'query', 'vertices'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['graph.explore'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -74,8 +88,14 @@ export default class Graph { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/health_report.ts b/src/api/api/health_report.ts index 51a48a265..300df315b 100644 --- a/src/api/api/health_report.ts +++ b/src/api/api/health_report.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,17 +21,37 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const acceptedParams: Record = { + health_report: { + path: [ + 'feature' + ], + body: [], + query: [ + 'timeout', + 'verbose', + 'size' + ] + } +} /** * Get the cluster health. Get a report with the health status of an Elasticsearch cluster. The report contains a list of indicators that compose Elasticsearch functionality. Each indicator has a health status of: green, unknown, yellow or red. The indicator will provide an explanation and metadata describing the reason for its current health status. The cluster’s status is controlled by the worst indicator status. In the event that an indicator’s status is non-green, a list of impacts may be present in the indicator result which detail the functionalities that are negatively affected by the health issue. Each impact carries with it a severity level, an area of the system that is affected, and a simple description of the impact on the system. Some health indicators can determine the root cause of a health problem and prescribe a set of steps that can be performed in order to improve the health of the system. The root cause and remediation steps are encapsulated in a diagnosis. A diagnosis contains a cause detailing a root cause analysis, an action containing a brief description of the steps to take to fix the problem, the list of affected resources (if applicable), and a detailed step-by-step troubleshooting guide to fix the diagnosed problem. NOTE: The health indicators perform root cause analysis of non-green health statuses. This can be computationally expensive when called frequently. When setting up automated polling of the API for health status, set verbose to false to disable the more expensive analysis logic. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-health-report | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-health-report | Elasticsearch API documentation} */ export default async function HealthReportApi (this: That, params?: T.HealthReportRequest, options?: TransportRequestOptionsWithOutMeta): Promise export default async function HealthReportApi (this: That, params?: T.HealthReportRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function HealthReportApi (this: That, params?: T.HealthReportRequest, options?: TransportRequestOptions): Promise export default async function HealthReportApi (this: That, params?: T.HealthReportRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['feature'] + const { + path: acceptedPath + } = acceptedParams.health_report + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/ilm.ts b/src/api/api/ilm.ts index 1c097071c..69eea6a6a 100644 --- a/src/api/api/ilm.ts +++ b/src/api/api/ilm.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,23 +21,134 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class Ilm { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'ilm.delete_lifecycle': { + path: [ + 'name' + ], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'ilm.explain_lifecycle': { + path: [ + 'index' + ], + body: [], + query: [ + 'only_errors', + 'only_managed', + 'master_timeout' + ] + }, + 'ilm.get_lifecycle': { + path: [ + 'name' + ], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'ilm.get_status': { + path: [], + body: [], + query: [] + }, + 'ilm.migrate_to_data_tiers': { + path: [], + body: [ + 'legacy_template_to_delete', + 'node_attribute' + ], + query: [ + 'dry_run', + 'master_timeout' + ] + }, + 'ilm.move_to_step': { + path: [ + 'index' + ], + body: [ + 'current_step', + 'next_step' + ], + query: [] + }, + 'ilm.put_lifecycle': { + path: [ + 'name' + ], + body: [ + 'policy' + ], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'ilm.remove_policy': { + path: [ + 'index' + ], + body: [], + query: [] + }, + 'ilm.retry': { + path: [ + 'index' + ], + body: [], + query: [] + }, + 'ilm.start': { + path: [], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'ilm.stop': { + path: [], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + } + } } /** * Delete a lifecycle policy. You cannot delete policies that are currently in use. If the policy is being used to manage any indices, the request fails and returns an error. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-delete-lifecycle | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ilm-delete-lifecycle | Elasticsearch API documentation} */ async deleteLifecycle (this: That, params: T.IlmDeleteLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteLifecycle (this: That, params: T.IlmDeleteLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteLifecycle (this: That, params: T.IlmDeleteLifecycleRequest, options?: TransportRequestOptions): Promise async deleteLifecycle (this: That, params: T.IlmDeleteLifecycleRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['ilm.delete_lifecycle'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -87,13 +184,16 @@ export default class Ilm { /** * Explain the lifecycle state. Get the current lifecycle status for one or more indices. For data streams, the API retrieves the current lifecycle status for the stream's backing indices. The response indicates when the index entered each lifecycle state, provides the definition of the running phase, and information about any failures. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-explain-lifecycle | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ilm-explain-lifecycle | Elasticsearch API documentation} */ async explainLifecycle (this: That, params: T.IlmExplainLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise async explainLifecycle (this: That, params: T.IlmExplainLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> async explainLifecycle (this: That, params: T.IlmExplainLifecycleRequest, options?: TransportRequestOptions): Promise async explainLifecycle (this: That, params: T.IlmExplainLifecycleRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] + const { + path: acceptedPath + } = this.acceptedParams['ilm.explain_lifecycle'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -129,13 +229,16 @@ export default class Ilm { /** * Get lifecycle policies. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-get-lifecycle | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ilm-get-lifecycle | Elasticsearch API documentation} */ async getLifecycle (this: That, params?: T.IlmGetLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getLifecycle (this: That, params?: T.IlmGetLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> async getLifecycle (this: That, params?: T.IlmGetLifecycleRequest, options?: TransportRequestOptions): Promise async getLifecycle (this: That, params?: T.IlmGetLifecycleRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['ilm.get_lifecycle'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -179,13 +282,16 @@ export default class Ilm { /** * Get the ILM status. Get the current index lifecycle management status. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-get-status | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ilm-get-status | Elasticsearch API documentation} */ async getStatus (this: That, params?: T.IlmGetStatusRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getStatus (this: That, params?: T.IlmGetStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> async getStatus (this: That, params?: T.IlmGetStatusRequest, options?: TransportRequestOptions): Promise async getStatus (this: That, params?: T.IlmGetStatusRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['ilm.get_status'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -219,14 +325,18 @@ export default class Ilm { /** * Migrate to data tiers routing. Switch the indices, ILM policies, and legacy, composable, and component templates from using custom node attributes and attribute-based allocation filters to using data tiers. Optionally, delete one legacy index template. Using node roles enables ILM to automatically move the indices between data tiers. Migrating away from custom node attributes routing can be manually performed. This API provides an automated way of performing three out of the four manual steps listed in the migration guide: 1. Stop setting the custom hot attribute on new indices. 1. Remove custom allocation settings from existing ILM policies. 1. Replace custom allocation settings from existing indices with the corresponding tier preference. ILM must be stopped before performing the migration. Use the stop ILM and get ILM status APIs to wait until the reported operation mode is `STOPPED`. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-migrate-to-data-tiers | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ilm-migrate-to-data-tiers | Elasticsearch API documentation} */ async migrateToDataTiers (this: That, params?: T.IlmMigrateToDataTiersRequest, options?: TransportRequestOptionsWithOutMeta): Promise async migrateToDataTiers (this: That, params?: T.IlmMigrateToDataTiersRequest, options?: TransportRequestOptionsWithMeta): Promise> async migrateToDataTiers (this: That, params?: T.IlmMigrateToDataTiersRequest, options?: TransportRequestOptions): Promise async migrateToDataTiers (this: That, params?: T.IlmMigrateToDataTiersRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['legacy_template_to_delete', 'node_attribute'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ilm.migrate_to_data_tiers'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -249,8 +359,14 @@ export default class Ilm { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -264,14 +380,18 @@ export default class Ilm { /** * Move to a lifecycle step. Manually move an index into a specific step in the lifecycle policy and run that step. WARNING: This operation can result in the loss of data. Manually moving an index into a specific step runs that step even if it has already been performed. This is a potentially destructive action and this should be considered an expert level API. You must specify both the current step and the step to be executed in the body of the request. The request will fail if the current step does not match the step currently running for the index This is to prevent the index from being moved from an unexpected step into the next step. When specifying the target (`next_step`) to which the index will be moved, either the name or both the action and name fields are optional. If only the phase is specified, the index will move to the first step of the first action in the target phase. If the phase and action are specified, the index will move to the first step of the specified action in the specified phase. Only actions specified in the ILM policy are considered valid. An index cannot move to a step that is not part of its policy. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-move-to-step | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ilm-move-to-step | Elasticsearch API documentation} */ async moveToStep (this: That, params: T.IlmMoveToStepRequest, options?: TransportRequestOptionsWithOutMeta): Promise async moveToStep (this: That, params: T.IlmMoveToStepRequest, options?: TransportRequestOptionsWithMeta): Promise> async moveToStep (this: That, params: T.IlmMoveToStepRequest, options?: TransportRequestOptions): Promise async moveToStep (this: That, params: T.IlmMoveToStepRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['current_step', 'next_step'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ilm.move_to_step'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -293,8 +413,14 @@ export default class Ilm { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -311,14 +437,18 @@ export default class Ilm { /** * Create or update a lifecycle policy. If the specified policy exists, it is replaced and the policy version is incremented. NOTE: Only the latest version of the policy is stored, you cannot revert to previous versions. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-put-lifecycle | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ilm-put-lifecycle | Elasticsearch API documentation} */ async putLifecycle (this: That, params: T.IlmPutLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putLifecycle (this: That, params: T.IlmPutLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> async putLifecycle (this: That, params: T.IlmPutLifecycleRequest, options?: TransportRequestOptions): Promise async putLifecycle (this: That, params: T.IlmPutLifecycleRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] - const acceptedBody: string[] = ['policy'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ilm.put_lifecycle'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -340,8 +470,14 @@ export default class Ilm { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -358,13 +494,16 @@ export default class Ilm { /** * Remove policies from an index. Remove the assigned lifecycle policies from an index or a data stream's backing indices. It also stops managing the indices. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-remove-policy | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ilm-remove-policy | Elasticsearch API documentation} */ async removePolicy (this: That, params: T.IlmRemovePolicyRequest, options?: TransportRequestOptionsWithOutMeta): Promise async removePolicy (this: That, params: T.IlmRemovePolicyRequest, options?: TransportRequestOptionsWithMeta): Promise> async removePolicy (this: That, params: T.IlmRemovePolicyRequest, options?: TransportRequestOptions): Promise async removePolicy (this: That, params: T.IlmRemovePolicyRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] + const { + path: acceptedPath + } = this.acceptedParams['ilm.remove_policy'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -400,13 +539,16 @@ export default class Ilm { /** * Retry a policy. Retry running the lifecycle policy for an index that is in the ERROR step. The API sets the policy back to the step where the error occurred and runs the step. Use the explain lifecycle state API to determine whether an index is in the ERROR step. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-retry | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ilm-retry | Elasticsearch API documentation} */ async retry (this: That, params: T.IlmRetryRequest, options?: TransportRequestOptionsWithOutMeta): Promise async retry (this: That, params: T.IlmRetryRequest, options?: TransportRequestOptionsWithMeta): Promise> async retry (this: That, params: T.IlmRetryRequest, options?: TransportRequestOptions): Promise async retry (this: That, params: T.IlmRetryRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] + const { + path: acceptedPath + } = this.acceptedParams['ilm.retry'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -442,13 +584,16 @@ export default class Ilm { /** * Start the ILM plugin. Start the index lifecycle management plugin if it is currently stopped. ILM is started automatically when the cluster is formed. Restarting ILM is necessary only when it has been stopped using the stop ILM API. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-start | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ilm-start | Elasticsearch API documentation} */ async start (this: That, params?: T.IlmStartRequest, options?: TransportRequestOptionsWithOutMeta): Promise async start (this: That, params?: T.IlmStartRequest, options?: TransportRequestOptionsWithMeta): Promise> async start (this: That, params?: T.IlmStartRequest, options?: TransportRequestOptions): Promise async start (this: That, params?: T.IlmStartRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['ilm.start'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -482,13 +627,16 @@ export default class Ilm { /** * Stop the ILM plugin. Halt all lifecycle management operations and stop the index lifecycle management plugin. This is useful when you are performing maintenance on the cluster and need to prevent ILM from performing any actions on your indices. The API returns as soon as the stop request has been acknowledged, but the plugin might continue to run until in-progress operations complete and the plugin can be safely stopped. Use the get ILM status API to check whether ILM is running. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ilm-stop | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ilm-stop | Elasticsearch API documentation} */ async stop (this: That, params?: T.IlmStopRequest, options?: TransportRequestOptionsWithOutMeta): Promise async stop (this: That, params?: T.IlmStopRequest, options?: TransportRequestOptionsWithMeta): Promise> async stop (this: That, params?: T.IlmStopRequest, options?: TransportRequestOptions): Promise async stop (this: That, params?: T.IlmStopRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['ilm.stop'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/index.ts b/src/api/api/index.ts index bcd3842eb..34ea4148d 100644 --- a/src/api/api/index.ts +++ b/src/api/api/index.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,18 +21,53 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +const acceptedParams: Record = { + index: { + path: [ + 'id', + 'index' + ], + body: [ + 'document' + ], + query: [ + 'if_primary_term', + 'if_seq_no', + 'include_source_on_error', + 'op_type', + 'pipeline', + 'refresh', + 'routing', + 'timeout', + 'version', + 'version_type', + 'wait_for_active_shards', + 'require_alias' + ] + } +} /** * Create or update a document in an index. Add a JSON document to the specified data stream or index and make it searchable. If the target is an index and the document already exists, the request updates the document and increments its version. NOTE: You cannot use this API to send update requests for existing documents in a data stream. If the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or index alias: * To add or overwrite a document using the `PUT //_doc/<_id>` request format, you must have the `create`, `index`, or `write` index privilege. * To add a document using the `POST //_doc/` request format, you must have the `create_doc`, `create`, `index`, or `write` index privilege. * To automatically create a data stream or index with this API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege. Automatic data stream creation requires a matching index template with data stream enabled. NOTE: Replica shards might not all be started when an indexing operation returns successfully. By default, only the primary is required. Set `wait_for_active_shards` to change this default behavior. **Automatically create data streams and indices** If the request's target doesn't exist and matches an index template with a `data_stream` definition, the index operation automatically creates the data stream. If the target doesn't exist and doesn't match a data stream template, the operation automatically creates the index and applies any matching index templates. NOTE: Elasticsearch includes several built-in index templates. To avoid naming collisions with these templates, refer to index pattern documentation. If no mapping exists, the index operation creates a dynamic mapping. By default, new fields and objects are automatically added to the mapping if needed. Automatic index creation is controlled by the `action.auto_create_index` setting. If it is `true`, any index can be created automatically. You can modify this setting to explicitly allow or block automatic creation of indices that match specified patterns or set it to `false` to turn off automatic index creation entirely. Specify a comma-separated list of patterns you want to allow or prefix each pattern with `+` or `-` to indicate whether it should be allowed or blocked. When a list is specified, the default behaviour is to disallow. NOTE: The `action.auto_create_index` setting affects the automatic creation of indices only. It does not affect the creation of data streams. **Optimistic concurrency control** Index operations can be made conditional and only be performed if the last modification to the document was assigned the sequence number and primary term specified by the `if_seq_no` and `if_primary_term` parameters. If a mismatch is detected, the operation will result in a `VersionConflictException` and a status code of `409`. **Routing** By default, shard placement — or routing — is controlled by using a hash of the document's ID value. For more explicit control, the value fed into the hash function used by the router can be directly specified on a per-operation basis using the `routing` parameter. When setting up explicit mapping, you can also use the `_routing` field to direct the index operation to extract the routing value from the document itself. This does come at the (very minimal) cost of an additional document parsing pass. If the `_routing` mapping is defined and set to be required, the index operation will fail if no routing value is provided or extracted. NOTE: Data streams do not support custom routing unless they were created with the `allow_custom_routing` setting enabled in the template. **Distributed** The index operation is directed to the primary shard based on its route and performed on the actual node containing this shard. After the primary shard completes the operation, if needed, the update is distributed to applicable replicas. **Active shards** To improve the resiliency of writes to the system, indexing operations can be configured to wait for a certain number of active shard copies before proceeding with the operation. If the requisite number of active shard copies are not available, then the write operation must wait and retry, until either the requisite shard copies have started or a timeout occurs. By default, write operations only wait for the primary shards to be active before proceeding (that is to say `wait_for_active_shards` is `1`). This default can be overridden in the index settings dynamically by setting `index.write.wait_for_active_shards`. To alter this behavior per operation, use the `wait_for_active_shards request` parameter. Valid values are all or any positive integer up to the total number of configured copies per shard in the index (which is `number_of_replicas`+1). Specifying a negative value or a number greater than the number of shard copies will throw an error. For example, suppose you have a cluster of three nodes, A, B, and C and you create an index index with the number of replicas set to 3 (resulting in 4 shard copies, one more copy than there are nodes). If you attempt an indexing operation, by default the operation will only ensure the primary copy of each shard is available before proceeding. This means that even if B and C went down and A hosted the primary shard copies, the indexing operation would still proceed with only one copy of the data. If `wait_for_active_shards` is set on the request to `3` (and all three nodes are up), the indexing operation will require 3 active shard copies before proceeding. This requirement should be met because there are 3 active nodes in the cluster, each one holding a copy of the shard. However, if you set `wait_for_active_shards` to `all` (or to `4`, which is the same in this situation), the indexing operation will not proceed as you do not have all 4 copies of each shard active in the index. The operation will timeout unless a new node is brought up in the cluster to host the fourth copy of the shard. It is important to note that this setting greatly reduces the chances of the write operation not writing to the requisite number of shard copies, but it does not completely eliminate the possibility, because this check occurs before the write operation starts. After the write operation is underway, it is still possible for replication to fail on any number of shard copies but still succeed on the primary. The `_shards` section of the API response reveals the number of shard copies on which replication succeeded and failed. **No operation (noop) updates** When updating a document by using this API, a new version of the document is always created even if the document hasn't changed. If this isn't acceptable use the `_update` API with `detect_noop` set to `true`. The `detect_noop` option isn't available on this API because it doesn’t fetch the old source and isn't able to compare it against the new source. There isn't a definitive rule for when noop updates aren't acceptable. It's a combination of lots of factors like how frequently your data source sends updates that are actually noops and how many queries per second Elasticsearch runs on the shard receiving the updates. **Versioning** Each indexed document is given a version number. By default, internal versioning is used that starts at 1 and increments with each update, deletes included. Optionally, the version number can be set to an external value (for example, if maintained in a database). To enable this functionality, `version_type` should be set to `external`. The value provided must be a numeric, long value greater than or equal to 0, and less than around `9.2e+18`. NOTE: Versioning is completely real time, and is not affected by the near real time aspects of search operations. If no version is provided, the operation runs without any version checks. When using the external version type, the system checks to see if the version number passed to the index request is greater than the version of the currently stored document. If true, the document will be indexed and the new version number used. If the value provided is less than or equal to the stored document's version number, a version conflict will occur and the index operation will fail. For example: ``` PUT my-index-000001/_doc/1?version=2&version_type=external { "user": { "id": "elkbee" } } In this example, the operation will succeed since the supplied version of 2 is higher than the current document version of 1. If the document was already updated and its version was set to 2 or higher, the indexing command will fail and result in a conflict (409 HTTP status code). A nice side effect is that there is no need to maintain strict ordering of async indexing operations run as a result of changes to a source database, as long as version numbers from the source database are used. Even the simple case of updating the Elasticsearch index using data from a database is simplified if external versioning is used, as only the latest version will be used if the index operations arrive out of order. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-create | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-create | Elasticsearch API documentation} */ export default async function IndexApi (this: That, params: T.IndexRequest, options?: TransportRequestOptionsWithOutMeta): Promise export default async function IndexApi (this: That, params: T.IndexRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function IndexApi (this: That, params: T.IndexRequest, options?: TransportRequestOptions): Promise export default async function IndexApi (this: That, params: T.IndexRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id', 'index'] - const acceptedBody: string[] = ['document'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = acceptedParams.index + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -58,8 +79,14 @@ export default async function IndexApi (this: That, params: } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/indices.ts b/src/api/api/indices.ts index 8af3fb23d..279578c2f 100644 --- a/src/api/api/indices.ts +++ b/src/api/api/indices.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,23 +21,859 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class Indices { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'indices.add_block': { + path: [ + 'index', + 'block' + ], + body: [], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'ignore_unavailable', + 'master_timeout', + 'timeout' + ] + }, + 'indices.analyze': { + path: [ + 'index' + ], + body: [ + 'analyzer', + 'attributes', + 'char_filter', + 'explain', + 'field', + 'filter', + 'normalizer', + 'text', + 'tokenizer' + ], + query: [ + 'index' + ] + }, + 'indices.cancel_migrate_reindex': { + path: [ + 'index' + ], + body: [], + query: [] + }, + 'indices.clear_cache': { + path: [ + 'index' + ], + body: [], + query: [ + 'index', + 'allow_no_indices', + 'expand_wildcards', + 'fielddata', + 'fields', + 'ignore_unavailable', + 'query', + 'request' + ] + }, + 'indices.clone': { + path: [ + 'index', + 'target' + ], + body: [ + 'aliases', + 'settings' + ], + query: [ + 'master_timeout', + 'timeout', + 'wait_for_active_shards' + ] + }, + 'indices.close': { + path: [ + 'index' + ], + body: [], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'ignore_unavailable', + 'master_timeout', + 'timeout', + 'wait_for_active_shards' + ] + }, + 'indices.create': { + path: [ + 'index' + ], + body: [ + 'aliases', + 'mappings', + 'settings' + ], + query: [ + 'master_timeout', + 'timeout', + 'wait_for_active_shards' + ] + }, + 'indices.create_data_stream': { + path: [ + 'name' + ], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'indices.create_from': { + path: [ + 'source', + 'dest' + ], + body: [ + 'create_from' + ], + query: [] + }, + 'indices.data_streams_stats': { + path: [ + 'name' + ], + body: [], + query: [ + 'expand_wildcards' + ] + }, + 'indices.delete': { + path: [ + 'index' + ], + body: [], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'ignore_unavailable', + 'master_timeout', + 'timeout' + ] + }, + 'indices.delete_alias': { + path: [ + 'index', + 'name' + ], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'indices.delete_data_lifecycle': { + path: [ + 'name' + ], + body: [], + query: [ + 'expand_wildcards', + 'master_timeout', + 'timeout' + ] + }, + 'indices.delete_data_stream': { + path: [ + 'name' + ], + body: [], + query: [ + 'master_timeout', + 'expand_wildcards' + ] + }, + 'indices.delete_index_template': { + path: [ + 'name' + ], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'indices.delete_template': { + path: [ + 'name' + ], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'indices.disk_usage': { + path: [ + 'index' + ], + body: [], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'flush', + 'ignore_unavailable', + 'run_expensive_tasks' + ] + }, + 'indices.downsample': { + path: [ + 'index', + 'target_index' + ], + body: [ + 'config' + ], + query: [] + }, + 'indices.exists': { + path: [ + 'index' + ], + body: [], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'flat_settings', + 'ignore_unavailable', + 'include_defaults', + 'local' + ] + }, + 'indices.exists_alias': { + path: [ + 'name', + 'index' + ], + body: [], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'ignore_unavailable', + 'master_timeout' + ] + }, + 'indices.exists_index_template': { + path: [ + 'name' + ], + body: [], + query: [ + 'local', + 'flat_settings', + 'master_timeout' + ] + }, + 'indices.exists_template': { + path: [ + 'name' + ], + body: [], + query: [ + 'flat_settings', + 'local', + 'master_timeout' + ] + }, + 'indices.explain_data_lifecycle': { + path: [ + 'index' + ], + body: [], + query: [ + 'include_defaults', + 'master_timeout' + ] + }, + 'indices.field_usage_stats': { + path: [ + 'index' + ], + body: [], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'ignore_unavailable', + 'fields' + ] + }, + 'indices.flush': { + path: [ + 'index' + ], + body: [], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'force', + 'ignore_unavailable', + 'wait_if_ongoing' + ] + }, + 'indices.forcemerge': { + path: [ + 'index' + ], + body: [], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'flush', + 'ignore_unavailable', + 'max_num_segments', + 'only_expunge_deletes', + 'wait_for_completion' + ] + }, + 'indices.get': { + path: [ + 'index' + ], + body: [], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'flat_settings', + 'ignore_unavailable', + 'include_defaults', + 'local', + 'master_timeout', + 'features' + ] + }, + 'indices.get_alias': { + path: [ + 'name', + 'index' + ], + body: [], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'ignore_unavailable', + 'master_timeout' + ] + }, + 'indices.get_data_lifecycle': { + path: [ + 'name' + ], + body: [], + query: [ + 'expand_wildcards', + 'include_defaults', + 'master_timeout' + ] + }, + 'indices.get_data_lifecycle_stats': { + path: [], + body: [], + query: [] + }, + 'indices.get_data_stream': { + path: [ + 'name' + ], + body: [], + query: [ + 'expand_wildcards', + 'include_defaults', + 'master_timeout', + 'verbose' + ] + }, + 'indices.get_field_mapping': { + path: [ + 'fields', + 'index' + ], + body: [], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'ignore_unavailable', + 'include_defaults', + 'local' + ] + }, + 'indices.get_index_template': { + path: [ + 'name' + ], + body: [], + query: [ + 'local', + 'flat_settings', + 'master_timeout', + 'include_defaults' + ] + }, + 'indices.get_mapping': { + path: [ + 'index' + ], + body: [], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'ignore_unavailable', + 'local', + 'master_timeout' + ] + }, + 'indices.get_migrate_reindex_status': { + path: [ + 'index' + ], + body: [], + query: [] + }, + 'indices.get_settings': { + path: [ + 'index', + 'name' + ], + body: [], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'flat_settings', + 'ignore_unavailable', + 'include_defaults', + 'local', + 'master_timeout' + ] + }, + 'indices.get_template': { + path: [ + 'name' + ], + body: [], + query: [ + 'flat_settings', + 'local', + 'master_timeout' + ] + }, + 'indices.migrate_reindex': { + path: [], + body: [ + 'reindex' + ], + query: [] + }, + 'indices.migrate_to_data_stream': { + path: [ + 'name' + ], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'indices.modify_data_stream': { + path: [], + body: [ + 'actions' + ], + query: [] + }, + 'indices.open': { + path: [ + 'index' + ], + body: [], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'ignore_unavailable', + 'master_timeout', + 'timeout', + 'wait_for_active_shards' + ] + }, + 'indices.promote_data_stream': { + path: [ + 'name' + ], + body: [], + query: [ + 'master_timeout' + ] + }, + 'indices.put_alias': { + path: [ + 'index', + 'name' + ], + body: [ + 'filter', + 'index_routing', + 'is_write_index', + 'routing', + 'search_routing' + ], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'indices.put_data_lifecycle': { + path: [ + 'name' + ], + body: [ + 'data_retention', + 'downsampling', + 'enabled' + ], + query: [ + 'expand_wildcards', + 'master_timeout', + 'timeout' + ] + }, + 'indices.put_index_template': { + path: [ + 'name' + ], + body: [ + 'index_patterns', + 'composed_of', + 'template', + 'data_stream', + 'priority', + 'version', + '_meta', + 'allow_auto_create', + 'ignore_missing_component_templates', + 'deprecated' + ], + query: [ + 'create', + 'master_timeout', + 'cause' + ] + }, + 'indices.put_mapping': { + path: [ + 'index' + ], + body: [ + 'date_detection', + 'dynamic', + 'dynamic_date_formats', + 'dynamic_templates', + '_field_names', + '_meta', + 'numeric_detection', + 'properties', + '_routing', + '_source', + 'runtime' + ], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'ignore_unavailable', + 'master_timeout', + 'timeout', + 'write_index_only' + ] + }, + 'indices.put_settings': { + path: [ + 'index' + ], + body: [ + 'settings' + ], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'flat_settings', + 'ignore_unavailable', + 'master_timeout', + 'preserve_existing', + 'reopen', + 'timeout' + ] + }, + 'indices.put_template': { + path: [ + 'name' + ], + body: [ + 'aliases', + 'index_patterns', + 'mappings', + 'order', + 'settings', + 'version' + ], + query: [ + 'create', + 'master_timeout', + 'order', + 'cause' + ] + }, + 'indices.recovery': { + path: [ + 'index' + ], + body: [], + query: [ + 'active_only', + 'detailed' + ] + }, + 'indices.refresh': { + path: [ + 'index' + ], + body: [], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'ignore_unavailable' + ] + }, + 'indices.reload_search_analyzers': { + path: [ + 'index' + ], + body: [], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'ignore_unavailable', + 'resource' + ] + }, + 'indices.resolve_cluster': { + path: [ + 'name' + ], + body: [], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'ignore_throttled', + 'ignore_unavailable', + 'timeout' + ] + }, + 'indices.resolve_index': { + path: [ + 'name' + ], + body: [], + query: [ + 'expand_wildcards', + 'ignore_unavailable', + 'allow_no_indices' + ] + }, + 'indices.rollover': { + path: [ + 'alias', + 'new_index' + ], + body: [ + 'aliases', + 'conditions', + 'mappings', + 'settings' + ], + query: [ + 'dry_run', + 'master_timeout', + 'timeout', + 'wait_for_active_shards', + 'lazy' + ] + }, + 'indices.segments': { + path: [ + 'index' + ], + body: [], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'ignore_unavailable' + ] + }, + 'indices.shard_stores': { + path: [ + 'index' + ], + body: [], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'ignore_unavailable', + 'status' + ] + }, + 'indices.shrink': { + path: [ + 'index', + 'target' + ], + body: [ + 'aliases', + 'settings' + ], + query: [ + 'master_timeout', + 'timeout', + 'wait_for_active_shards' + ] + }, + 'indices.simulate_index_template': { + path: [ + 'name' + ], + body: [], + query: [ + 'create', + 'cause', + 'master_timeout', + 'include_defaults' + ] + }, + 'indices.simulate_template': { + path: [ + 'name' + ], + body: [ + 'allow_auto_create', + 'index_patterns', + 'composed_of', + 'template', + 'data_stream', + 'priority', + 'version', + '_meta', + 'ignore_missing_component_templates', + 'deprecated' + ], + query: [ + 'create', + 'cause', + 'master_timeout', + 'include_defaults' + ] + }, + 'indices.split': { + path: [ + 'index', + 'target' + ], + body: [ + 'aliases', + 'settings' + ], + query: [ + 'master_timeout', + 'timeout', + 'wait_for_active_shards' + ] + }, + 'indices.stats': { + path: [ + 'metric', + 'index' + ], + body: [], + query: [ + 'completion_fields', + 'expand_wildcards', + 'fielddata_fields', + 'fields', + 'forbid_closed_indices', + 'groups', + 'include_segment_file_sizes', + 'include_unloaded_segments', + 'level' + ] + }, + 'indices.update_aliases': { + path: [], + body: [ + 'actions' + ], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'indices.validate_query': { + path: [ + 'index' + ], + body: [ + 'query' + ], + query: [ + 'allow_no_indices', + 'all_shards', + 'analyzer', + 'analyze_wildcard', + 'default_operator', + 'df', + 'expand_wildcards', + 'explain', + 'ignore_unavailable', + 'lenient', + 'rewrite', + 'q' + ] + } + } } /** * Add an index block. Add an index block to an index. Index blocks limit the operations allowed on an index by blocking specific operation types. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-add-block | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-indices-add-block | Elasticsearch API documentation} */ async addBlock (this: That, params: T.IndicesAddBlockRequest, options?: TransportRequestOptionsWithOutMeta): Promise async addBlock (this: That, params: T.IndicesAddBlockRequest, options?: TransportRequestOptionsWithMeta): Promise> async addBlock (this: That, params: T.IndicesAddBlockRequest, options?: TransportRequestOptions): Promise async addBlock (this: That, params: T.IndicesAddBlockRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index', 'block'] + const { + path: acceptedPath + } = this.acceptedParams['indices.add_block'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -88,14 +910,18 @@ export default class Indices { /** * Get tokens from text analysis. The analyze API performs analysis on a text string and returns the resulting tokens. Generating excessive amount of tokens may cause a node to run out of memory. The `index.analyze.max_token_count` setting enables you to limit the number of tokens that can be produced. If more than this limit of tokens gets generated, an error occurs. The `_analyze` endpoint without a specified index will always use `10000` as its limit. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-analyze | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-indices-analyze | Elasticsearch API documentation} */ async analyze (this: That, params?: T.IndicesAnalyzeRequest, options?: TransportRequestOptionsWithOutMeta): Promise async analyze (this: That, params?: T.IndicesAnalyzeRequest, options?: TransportRequestOptionsWithMeta): Promise> async analyze (this: That, params?: T.IndicesAnalyzeRequest, options?: TransportRequestOptions): Promise async analyze (this: That, params?: T.IndicesAnalyzeRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['analyzer', 'attributes', 'char_filter', 'explain', 'field', 'filter', 'normalizer', 'text', 'tokenizer'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['indices.analyze'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -118,8 +944,14 @@ export default class Indices { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -143,13 +975,16 @@ export default class Indices { /** * Cancel a migration reindex operation. Cancel a migration reindex attempt for a data stream or index. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/migrate-data-stream.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-indices-cancel-migrate-reindex | Elasticsearch API documentation} */ async cancelMigrateReindex (this: That, params: T.IndicesCancelMigrateReindexRequest, options?: TransportRequestOptionsWithOutMeta): Promise async cancelMigrateReindex (this: That, params: T.IndicesCancelMigrateReindexRequest, options?: TransportRequestOptionsWithMeta): Promise> async cancelMigrateReindex (this: That, params: T.IndicesCancelMigrateReindexRequest, options?: TransportRequestOptions): Promise async cancelMigrateReindex (this: That, params: T.IndicesCancelMigrateReindexRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] + const { + path: acceptedPath + } = this.acceptedParams['indices.cancel_migrate_reindex'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -185,13 +1020,16 @@ export default class Indices { /** * Clear the cache. Clear the cache of one or more indices. For data streams, the API clears the caches of the stream's backing indices. By default, the clear cache API clears all caches. To clear only specific caches, use the `fielddata`, `query`, or `request` parameters. To clear the cache only of specific fields, use the `fields` parameter. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-clear-cache | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-indices-clear-cache | Elasticsearch API documentation} */ async clearCache (this: That, params?: T.IndicesClearCacheRequest, options?: TransportRequestOptionsWithOutMeta): Promise async clearCache (this: That, params?: T.IndicesClearCacheRequest, options?: TransportRequestOptionsWithMeta): Promise> async clearCache (this: That, params?: T.IndicesClearCacheRequest, options?: TransportRequestOptions): Promise async clearCache (this: That, params?: T.IndicesClearCacheRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] + const { + path: acceptedPath + } = this.acceptedParams['indices.clear_cache'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -235,14 +1073,18 @@ export default class Indices { /** * Clone an index. Clone an existing index into a new index. Each original primary shard is cloned into a new primary shard in the new index. IMPORTANT: Elasticsearch does not apply index templates to the resulting index. The API also does not copy index metadata from the original index. Index metadata includes aliases, index lifecycle management phase definitions, and cross-cluster replication (CCR) follower information. For example, if you clone a CCR follower index, the resulting clone will not be a follower index. The clone API copies most index settings from the source index to the resulting index, with the exception of `index.number_of_replicas` and `index.auto_expand_replicas`. To set the number of replicas in the resulting index, configure these settings in the clone request. Cloning works as follows: * First, it creates a new target index with the same definition as the source index. * Then it hard-links segments from the source index into the target index. If the file system does not support hard-linking, all segments are copied into the new index, which is a much more time consuming process. * Finally, it recovers the target index as though it were a closed index which had just been re-opened. IMPORTANT: Indices can only be cloned if they meet the following requirements: * The index must be marked as read-only and have a cluster health status of green. * The target index must not exist. * The source index must have the same number of primary shards as the target index. * The node handling the clone process must have sufficient free disk space to accommodate a second copy of the existing index. The current write index on a data stream cannot be cloned. In order to clone the current write index, the data stream must first be rolled over so that a new write index is created and then the previous write index can be cloned. NOTE: Mappings cannot be specified in the `_clone` request. The mappings of the source index will be used for the target index. **Monitor the cloning process** The cloning process can be monitored with the cat recovery API or the cluster health API can be used to wait until all primary shards have been allocated by setting the `wait_for_status` parameter to `yellow`. The `_clone` API returns as soon as the target index has been added to the cluster state, before any shards have been allocated. At this point, all shards are in the state unassigned. If, for any reason, the target index can't be allocated, its primary shard will remain unassigned until it can be allocated on that node. Once the primary shard is allocated, it moves to state initializing, and the clone process begins. When the clone operation completes, the shard will become active. At that point, Elasticsearch will try to allocate any replicas and may decide to relocate the primary shard to another node. **Wait for active shards** Because the clone operation creates a new index to clone the shards to, the wait for active shards setting on index creation applies to the clone index action as well. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-clone | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-indices-clone | Elasticsearch API documentation} */ async clone (this: That, params: T.IndicesCloneRequest, options?: TransportRequestOptionsWithOutMeta): Promise async clone (this: That, params: T.IndicesCloneRequest, options?: TransportRequestOptionsWithMeta): Promise> async clone (this: That, params: T.IndicesCloneRequest, options?: TransportRequestOptions): Promise async clone (this: That, params: T.IndicesCloneRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index', 'target'] - const acceptedBody: string[] = ['aliases', 'settings'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['indices.clone'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -264,8 +1106,14 @@ export default class Indices { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -283,13 +1131,16 @@ export default class Indices { /** * Close an index. A closed index is blocked for read or write operations and does not allow all operations that opened indices allow. It is not possible to index documents or to search for documents in a closed index. Closed indices do not have to maintain internal data structures for indexing or searching documents, which results in a smaller overhead on the cluster. When opening or closing an index, the master node is responsible for restarting the index shards to reflect the new state of the index. The shards will then go through the normal recovery process. The data of opened and closed indices is automatically replicated by the cluster to ensure that enough shard copies are safely kept around at all times. You can open and close multiple indices. An error is thrown if the request explicitly refers to a missing index. This behaviour can be turned off using the `ignore_unavailable=true` parameter. By default, you must explicitly name the indices you are opening or closing. To open or close indices with `_all`, `*`, or other wildcard expressions, change the` action.destructive_requires_name` setting to `false`. This setting can also be changed with the cluster update settings API. Closed indices consume a significant amount of disk-space which can cause problems in managed environments. Closing indices can be turned off with the cluster settings API by setting `cluster.indices.close.enable` to `false`. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-close | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-indices-close | Elasticsearch API documentation} */ async close (this: That, params: T.IndicesCloseRequest, options?: TransportRequestOptionsWithOutMeta): Promise async close (this: That, params: T.IndicesCloseRequest, options?: TransportRequestOptionsWithMeta): Promise> async close (this: That, params: T.IndicesCloseRequest, options?: TransportRequestOptions): Promise async close (this: That, params: T.IndicesCloseRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] + const { + path: acceptedPath + } = this.acceptedParams['indices.close'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -325,14 +1176,18 @@ export default class Indices { /** * Create an index. You can use the create index API to add a new index to an Elasticsearch cluster. When creating an index, you can specify the following: * Settings for the index. * Mappings for fields in the index. * Index aliases **Wait for active shards** By default, index creation will only return a response to the client when the primary copies of each shard have been started, or the request times out. The index creation response will indicate what happened. For example, `acknowledged` indicates whether the index was successfully created in the cluster, `while shards_acknowledged` indicates whether the requisite number of shard copies were started for each shard in the index before timing out. Note that it is still possible for either `acknowledged` or `shards_acknowledged` to be `false`, but for the index creation to be successful. These values simply indicate whether the operation completed before the timeout. If `acknowledged` is false, the request timed out before the cluster state was updated with the newly created index, but it probably will be created sometime soon. If `shards_acknowledged` is false, then the request timed out before the requisite number of shards were started (by default just the primaries), even if the cluster state was successfully updated to reflect the newly created index (that is to say, `acknowledged` is `true`). You can change the default of only waiting for the primary shards to start through the index setting `index.write.wait_for_active_shards`. Note that changing this setting will also affect the `wait_for_active_shards` value on all subsequent write operations. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-create | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-indices-create | Elasticsearch API documentation} */ async create (this: That, params: T.IndicesCreateRequest, options?: TransportRequestOptionsWithOutMeta): Promise async create (this: That, params: T.IndicesCreateRequest, options?: TransportRequestOptionsWithMeta): Promise> async create (this: That, params: T.IndicesCreateRequest, options?: TransportRequestOptions): Promise async create (this: That, params: T.IndicesCreateRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['aliases', 'mappings', 'settings'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['indices.create'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -354,8 +1209,14 @@ export default class Indices { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -372,13 +1233,16 @@ export default class Indices { /** * Create a data stream. You must have a matching index template with data stream enabled. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-create-data-stream | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-indices-create-data-stream | Elasticsearch API documentation} */ async createDataStream (this: That, params: T.IndicesCreateDataStreamRequest, options?: TransportRequestOptionsWithOutMeta): Promise async createDataStream (this: That, params: T.IndicesCreateDataStreamRequest, options?: TransportRequestOptionsWithMeta): Promise> async createDataStream (this: That, params: T.IndicesCreateDataStreamRequest, options?: TransportRequestOptions): Promise async createDataStream (this: That, params: T.IndicesCreateDataStreamRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['indices.create_data_stream'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -414,14 +1278,18 @@ export default class Indices { /** * Create an index from a source index. Copy the mappings and settings from the source index to a destination index while allowing request settings and mappings to override the source values. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/migrate-data-stream.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-indices-create-from | Elasticsearch API documentation} */ async createFrom (this: That, params: T.IndicesCreateFromRequest, options?: TransportRequestOptionsWithOutMeta): Promise async createFrom (this: That, params: T.IndicesCreateFromRequest, options?: TransportRequestOptionsWithMeta): Promise> async createFrom (this: That, params: T.IndicesCreateFromRequest, options?: TransportRequestOptions): Promise async createFrom (this: That, params: T.IndicesCreateFromRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['source', 'dest'] - const acceptedBody: string[] = ['create_from'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['indices.create_from'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -433,8 +1301,14 @@ export default class Indices { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -452,13 +1326,16 @@ export default class Indices { /** * Get data stream stats. Get statistics for one or more data streams. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-data-streams-stats-1 | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-indices-data-streams-stats-1 | Elasticsearch API documentation} */ async dataStreamsStats (this: That, params?: T.IndicesDataStreamsStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async dataStreamsStats (this: That, params?: T.IndicesDataStreamsStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> async dataStreamsStats (this: That, params?: T.IndicesDataStreamsStatsRequest, options?: TransportRequestOptions): Promise async dataStreamsStats (this: That, params?: T.IndicesDataStreamsStatsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['indices.data_streams_stats'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -502,13 +1379,16 @@ export default class Indices { /** * Delete indices. Deleting an index deletes its documents, shards, and metadata. It does not delete related Kibana components, such as data views, visualizations, or dashboards. You cannot delete the current write index of a data stream. To delete the index, you must roll over the data stream so a new write index is created. You can then use the delete index API to delete the previous write index. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-delete | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-indices-delete | Elasticsearch API documentation} */ async delete (this: That, params: T.IndicesDeleteRequest, options?: TransportRequestOptionsWithOutMeta): Promise async delete (this: That, params: T.IndicesDeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> async delete (this: That, params: T.IndicesDeleteRequest, options?: TransportRequestOptions): Promise async delete (this: That, params: T.IndicesDeleteRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] + const { + path: acceptedPath + } = this.acceptedParams['indices.delete'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -544,13 +1424,16 @@ export default class Indices { /** * Delete an alias. Removes a data stream or index from an alias. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-delete-alias | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-indices-delete-alias | Elasticsearch API documentation} */ async deleteAlias (this: That, params: T.IndicesDeleteAliasRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteAlias (this: That, params: T.IndicesDeleteAliasRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteAlias (this: That, params: T.IndicesDeleteAliasRequest, options?: TransportRequestOptions): Promise async deleteAlias (this: That, params: T.IndicesDeleteAliasRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index', 'name'] + const { + path: acceptedPath + } = this.acceptedParams['indices.delete_alias'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -594,13 +1477,16 @@ export default class Indices { /** * Delete data stream lifecycles. Removes the data stream lifecycle from a data stream, rendering it not managed by the data stream lifecycle. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-delete-data-lifecycle | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-indices-delete-data-lifecycle | Elasticsearch API documentation} */ async deleteDataLifecycle (this: That, params: T.IndicesDeleteDataLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteDataLifecycle (this: That, params: T.IndicesDeleteDataLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteDataLifecycle (this: That, params: T.IndicesDeleteDataLifecycleRequest, options?: TransportRequestOptions): Promise async deleteDataLifecycle (this: That, params: T.IndicesDeleteDataLifecycleRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['indices.delete_data_lifecycle'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -636,13 +1522,16 @@ export default class Indices { /** * Delete data streams. Deletes one or more data streams and their backing indices. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-delete-data-stream | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-indices-delete-data-stream | Elasticsearch API documentation} */ async deleteDataStream (this: That, params: T.IndicesDeleteDataStreamRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteDataStream (this: That, params: T.IndicesDeleteDataStreamRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteDataStream (this: That, params: T.IndicesDeleteDataStreamRequest, options?: TransportRequestOptions): Promise async deleteDataStream (this: That, params: T.IndicesDeleteDataStreamRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['indices.delete_data_stream'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -678,13 +1567,16 @@ export default class Indices { /** * Delete an index template. The provided may contain multiple template names separated by a comma. If multiple template names are specified then there is no wildcard support and the provided names should match completely with existing templates. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-delete-index-template | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-indices-delete-index-template | Elasticsearch API documentation} */ async deleteIndexTemplate (this: That, params: T.IndicesDeleteIndexTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteIndexTemplate (this: That, params: T.IndicesDeleteIndexTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteIndexTemplate (this: That, params: T.IndicesDeleteIndexTemplateRequest, options?: TransportRequestOptions): Promise async deleteIndexTemplate (this: That, params: T.IndicesDeleteIndexTemplateRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['indices.delete_index_template'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -720,13 +1612,16 @@ export default class Indices { /** * Delete a legacy index template. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-delete-template | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-indices-delete-template | Elasticsearch API documentation} */ async deleteTemplate (this: That, params: T.IndicesDeleteTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteTemplate (this: That, params: T.IndicesDeleteTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteTemplate (this: That, params: T.IndicesDeleteTemplateRequest, options?: TransportRequestOptions): Promise async deleteTemplate (this: That, params: T.IndicesDeleteTemplateRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['indices.delete_template'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -762,13 +1657,16 @@ export default class Indices { /** * Analyze the index disk usage. Analyze the disk usage of each field of an index or data stream. This API might not support indices created in previous Elasticsearch versions. The result of a small index can be inaccurate as some parts of an index might not be analyzed by the API. NOTE: The total size of fields of the analyzed shards of the index in the response is usually smaller than the index `store_size` value because some small metadata files are ignored and some parts of data files might not be scanned by the API. Since stored fields are stored together in a compressed format, the sizes of stored fields are also estimates and can be inaccurate. The stored size of the `_id` field is likely underestimated while the `_source` field is overestimated. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-disk-usage | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-indices-disk-usage | Elasticsearch API documentation} */ async diskUsage (this: That, params: T.IndicesDiskUsageRequest, options?: TransportRequestOptionsWithOutMeta): Promise async diskUsage (this: That, params: T.IndicesDiskUsageRequest, options?: TransportRequestOptionsWithMeta): Promise> async diskUsage (this: That, params: T.IndicesDiskUsageRequest, options?: TransportRequestOptions): Promise async diskUsage (this: That, params: T.IndicesDiskUsageRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] + const { + path: acceptedPath + } = this.acceptedParams['indices.disk_usage'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -804,14 +1702,18 @@ export default class Indices { /** * Downsample an index. Aggregate a time series (TSDS) index and store pre-computed statistical summaries (`min`, `max`, `sum`, `value_count` and `avg`) for each metric field grouped by a configured time interval. For example, a TSDS index that contains metrics sampled every 10 seconds can be downsampled to an hourly index. All documents within an hour interval are summarized and stored as a single document in the downsample index. NOTE: Only indices in a time series data stream are supported. Neither field nor document level security can be defined on the source index. The source index must be read only (`index.blocks.write: true`). - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-downsample | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-indices-downsample | Elasticsearch API documentation} */ async downsample (this: That, params: T.IndicesDownsampleRequest, options?: TransportRequestOptionsWithOutMeta): Promise async downsample (this: That, params: T.IndicesDownsampleRequest, options?: TransportRequestOptionsWithMeta): Promise> async downsample (this: That, params: T.IndicesDownsampleRequest, options?: TransportRequestOptions): Promise async downsample (this: That, params: T.IndicesDownsampleRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index', 'target_index'] - const acceptedBody: string[] = ['config'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['indices.downsample'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -823,8 +1725,14 @@ export default class Indices { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -842,13 +1750,16 @@ export default class Indices { /** * Check indices. Check if one or more indices, index aliases, or data streams exist. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-exists | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-indices-exists | Elasticsearch API documentation} */ async exists (this: That, params: T.IndicesExistsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async exists (this: That, params: T.IndicesExistsRequest, options?: TransportRequestOptionsWithMeta): Promise> async exists (this: That, params: T.IndicesExistsRequest, options?: TransportRequestOptions): Promise async exists (this: That, params: T.IndicesExistsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] + const { + path: acceptedPath + } = this.acceptedParams['indices.exists'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -884,13 +1795,16 @@ export default class Indices { /** * Check aliases. Check if one or more data stream or index aliases exist. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-exists-alias | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-indices-exists-alias | Elasticsearch API documentation} */ async existsAlias (this: That, params: T.IndicesExistsAliasRequest, options?: TransportRequestOptionsWithOutMeta): Promise async existsAlias (this: That, params: T.IndicesExistsAliasRequest, options?: TransportRequestOptionsWithMeta): Promise> async existsAlias (this: That, params: T.IndicesExistsAliasRequest, options?: TransportRequestOptions): Promise async existsAlias (this: That, params: T.IndicesExistsAliasRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name', 'index'] + const { + path: acceptedPath + } = this.acceptedParams['indices.exists_alias'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -934,13 +1848,16 @@ export default class Indices { /** * Check index templates. Check whether index templates exist. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-exists-index-template | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-indices-exists-index-template | Elasticsearch API documentation} */ async existsIndexTemplate (this: That, params: T.IndicesExistsIndexTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise async existsIndexTemplate (this: That, params: T.IndicesExistsIndexTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> async existsIndexTemplate (this: That, params: T.IndicesExistsIndexTemplateRequest, options?: TransportRequestOptions): Promise async existsIndexTemplate (this: That, params: T.IndicesExistsIndexTemplateRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['indices.exists_index_template'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -976,13 +1893,16 @@ export default class Indices { /** * Check existence of index templates. Get information about whether index templates exist. Index templates define settings, mappings, and aliases that can be applied automatically to new indices. IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-exists-template | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-indices-exists-template | Elasticsearch API documentation} */ async existsTemplate (this: That, params: T.IndicesExistsTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise async existsTemplate (this: That, params: T.IndicesExistsTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> async existsTemplate (this: That, params: T.IndicesExistsTemplateRequest, options?: TransportRequestOptions): Promise async existsTemplate (this: That, params: T.IndicesExistsTemplateRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['indices.exists_template'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1018,13 +1938,16 @@ export default class Indices { /** * Get the status for a data stream lifecycle. Get information about an index or data stream's current data stream lifecycle status, such as time since index creation, time since rollover, the lifecycle configuration managing the index, or any errors encountered during lifecycle execution. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-explain-data-lifecycle | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-indices-explain-data-lifecycle | Elasticsearch API documentation} */ async explainDataLifecycle (this: That, params: T.IndicesExplainDataLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise async explainDataLifecycle (this: That, params: T.IndicesExplainDataLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> async explainDataLifecycle (this: That, params: T.IndicesExplainDataLifecycleRequest, options?: TransportRequestOptions): Promise async explainDataLifecycle (this: That, params: T.IndicesExplainDataLifecycleRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] + const { + path: acceptedPath + } = this.acceptedParams['indices.explain_data_lifecycle'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1060,13 +1983,16 @@ export default class Indices { /** * Get field usage stats. Get field usage information for each shard and field of an index. Field usage statistics are automatically captured when queries are running on a cluster. A shard-level search request that accesses a given field, even if multiple times during that request, is counted as a single use. The response body reports the per-shard usage count of the data structures that back the fields in the index. A given request will increment each count by a maximum value of 1, even if the request accesses the same field multiple times. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-field-usage-stats | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-indices-field-usage-stats | Elasticsearch API documentation} */ async fieldUsageStats (this: That, params: T.IndicesFieldUsageStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async fieldUsageStats (this: That, params: T.IndicesFieldUsageStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> async fieldUsageStats (this: That, params: T.IndicesFieldUsageStatsRequest, options?: TransportRequestOptions): Promise async fieldUsageStats (this: That, params: T.IndicesFieldUsageStatsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] + const { + path: acceptedPath + } = this.acceptedParams['indices.field_usage_stats'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1102,13 +2028,16 @@ export default class Indices { /** * Flush data streams or indices. Flushing a data stream or index is the process of making sure that any data that is currently only stored in the transaction log is also permanently stored in the Lucene index. When restarting, Elasticsearch replays any unflushed operations from the transaction log into the Lucene index to bring it back into the state that it was in before the restart. Elasticsearch automatically triggers flushes as needed, using heuristics that trade off the size of the unflushed transaction log against the cost of performing each flush. After each operation has been flushed it is permanently stored in the Lucene index. This may mean that there is no need to maintain an additional copy of it in the transaction log. The transaction log is made up of multiple files, called generations, and Elasticsearch will delete any generation files when they are no longer needed, freeing up disk space. It is also possible to trigger a flush on one or more indices using the flush API, although it is rare for users to need to call this API directly. If you call the flush API after indexing some documents then a successful response indicates that Elasticsearch has flushed all the documents that were indexed before the flush API was called. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-flush | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-indices-flush | Elasticsearch API documentation} */ async flush (this: That, params?: T.IndicesFlushRequest, options?: TransportRequestOptionsWithOutMeta): Promise async flush (this: That, params?: T.IndicesFlushRequest, options?: TransportRequestOptionsWithMeta): Promise> async flush (this: That, params?: T.IndicesFlushRequest, options?: TransportRequestOptions): Promise async flush (this: That, params?: T.IndicesFlushRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] + const { + path: acceptedPath + } = this.acceptedParams['indices.flush'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1152,13 +2081,16 @@ export default class Indices { /** * Force a merge. Perform the force merge operation on the shards of one or more indices. For data streams, the API forces a merge on the shards of the stream's backing indices. Merging reduces the number of segments in each shard by merging some of them together and also frees up the space used by deleted documents. Merging normally happens automatically, but sometimes it is useful to trigger a merge manually. WARNING: We recommend force merging only a read-only index (meaning the index is no longer receiving writes). When documents are updated or deleted, the old version is not immediately removed but instead soft-deleted and marked with a "tombstone". These soft-deleted documents are automatically cleaned up during regular segment merges. But force merge can cause very large (greater than 5 GB) segments to be produced, which are not eligible for regular merges. So the number of soft-deleted documents can then grow rapidly, resulting in higher disk usage and worse search performance. If you regularly force merge an index receiving writes, this can also make snapshots more expensive, since the new documents can't be backed up incrementally. **Blocks during a force merge** Calls to this API block until the merge is complete (unless request contains `wait_for_completion=false`). If the client connection is lost before completion then the force merge process will continue in the background. Any new requests to force merge the same indices will also block until the ongoing force merge is complete. **Running force merge asynchronously** If the request contains `wait_for_completion=false`, Elasticsearch performs some preflight checks, launches the request, and returns a task you can use to get the status of the task. However, you can not cancel this task as the force merge task is not cancelable. Elasticsearch creates a record of this task as a document at `_tasks/`. When you are done with a task, you should delete the task document so Elasticsearch can reclaim the space. **Force merging multiple indices** You can force merge multiple indices with a single request by targeting: * One or more data streams that contain multiple backing indices * Multiple indices * One or more aliases * All data streams and indices in a cluster Each targeted shard is force-merged separately using the force_merge threadpool. By default each node only has a single `force_merge` thread which means that the shards on that node are force-merged one at a time. If you expand the `force_merge` threadpool on a node then it will force merge its shards in parallel Force merge makes the storage for the shard being merged temporarily increase, as it may require free space up to triple its size in case `max_num_segments parameter` is set to `1`, to rewrite all segments into a new one. **Data streams and time-based indices** Force-merging is useful for managing a data stream's older backing indices and other time-based indices, particularly after a rollover. In these cases, each index only receives indexing traffic for a certain period of time. Once an index receive no more writes, its shards can be force-merged to a single segment. This can be a good idea because single-segment shards can sometimes use simpler and more efficient data structures to perform searches. For example: ``` POST /.ds-my-data-stream-2099.03.07-000001/_forcemerge?max_num_segments=1 ``` - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-forcemerge | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-indices-forcemerge | Elasticsearch API documentation} */ async forcemerge (this: That, params?: T.IndicesForcemergeRequest, options?: TransportRequestOptionsWithOutMeta): Promise async forcemerge (this: That, params?: T.IndicesForcemergeRequest, options?: TransportRequestOptionsWithMeta): Promise> async forcemerge (this: That, params?: T.IndicesForcemergeRequest, options?: TransportRequestOptions): Promise async forcemerge (this: That, params?: T.IndicesForcemergeRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] + const { + path: acceptedPath + } = this.acceptedParams['indices.forcemerge'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1202,13 +2134,16 @@ export default class Indices { /** * Get index information. Get information about one or more indices. For data streams, the API returns information about the stream’s backing indices. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-indices-get | Elasticsearch API documentation} */ async get (this: That, params: T.IndicesGetRequest, options?: TransportRequestOptionsWithOutMeta): Promise async get (this: That, params: T.IndicesGetRequest, options?: TransportRequestOptionsWithMeta): Promise> async get (this: That, params: T.IndicesGetRequest, options?: TransportRequestOptions): Promise async get (this: That, params: T.IndicesGetRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] + const { + path: acceptedPath + } = this.acceptedParams['indices.get'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1244,13 +2179,16 @@ export default class Indices { /** * Get aliases. Retrieves information for one or more data stream or index aliases. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-alias | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-indices-get-alias | Elasticsearch API documentation} */ async getAlias (this: That, params?: T.IndicesGetAliasRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getAlias (this: That, params?: T.IndicesGetAliasRequest, options?: TransportRequestOptionsWithMeta): Promise> async getAlias (this: That, params?: T.IndicesGetAliasRequest, options?: TransportRequestOptions): Promise async getAlias (this: That, params?: T.IndicesGetAliasRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name', 'index'] + const { + path: acceptedPath + } = this.acceptedParams['indices.get_alias'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1301,13 +2239,16 @@ export default class Indices { /** * Get data stream lifecycles. Get the data stream lifecycle configuration of one or more data streams. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-data-lifecycle | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-indices-get-data-lifecycle | Elasticsearch API documentation} */ async getDataLifecycle (this: That, params: T.IndicesGetDataLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getDataLifecycle (this: That, params: T.IndicesGetDataLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> async getDataLifecycle (this: That, params: T.IndicesGetDataLifecycleRequest, options?: TransportRequestOptions): Promise async getDataLifecycle (this: That, params: T.IndicesGetDataLifecycleRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['indices.get_data_lifecycle'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1343,13 +2284,16 @@ export default class Indices { /** * Get data stream lifecycle stats. Get statistics about the data streams that are managed by a data stream lifecycle. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-data-lifecycle-stats | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-indices-get-data-lifecycle-stats | Elasticsearch API documentation} */ async getDataLifecycleStats (this: That, params?: T.IndicesGetDataLifecycleStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getDataLifecycleStats (this: That, params?: T.IndicesGetDataLifecycleStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getDataLifecycleStats (this: That, params?: T.IndicesGetDataLifecycleStatsRequest, options?: TransportRequestOptions): Promise async getDataLifecycleStats (this: That, params?: T.IndicesGetDataLifecycleStatsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['indices.get_data_lifecycle_stats'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1383,13 +2327,16 @@ export default class Indices { /** * Get data streams. Get information about one or more data streams. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-data-stream | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-indices-get-data-stream | Elasticsearch API documentation} */ async getDataStream (this: That, params?: T.IndicesGetDataStreamRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getDataStream (this: That, params?: T.IndicesGetDataStreamRequest, options?: TransportRequestOptionsWithMeta): Promise> async getDataStream (this: That, params?: T.IndicesGetDataStreamRequest, options?: TransportRequestOptions): Promise async getDataStream (this: That, params?: T.IndicesGetDataStreamRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['indices.get_data_stream'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1433,13 +2380,16 @@ export default class Indices { /** * Get mapping definitions. Retrieves mapping definitions for one or more fields. For data streams, the API retrieves field mappings for the stream’s backing indices. This API is useful if you don't need a complete mapping or if an index mapping contains a large number of fields. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-mapping | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-indices-get-mapping | Elasticsearch API documentation} */ async getFieldMapping (this: That, params: T.IndicesGetFieldMappingRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getFieldMapping (this: That, params: T.IndicesGetFieldMappingRequest, options?: TransportRequestOptionsWithMeta): Promise> async getFieldMapping (this: That, params: T.IndicesGetFieldMappingRequest, options?: TransportRequestOptions): Promise async getFieldMapping (this: That, params: T.IndicesGetFieldMappingRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['fields', 'index'] + const { + path: acceptedPath + } = this.acceptedParams['indices.get_field_mapping'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1483,13 +2433,16 @@ export default class Indices { /** * Get index templates. Get information about one or more index templates. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-index-template | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-indices-get-index-template | Elasticsearch API documentation} */ async getIndexTemplate (this: That, params?: T.IndicesGetIndexTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getIndexTemplate (this: That, params?: T.IndicesGetIndexTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> async getIndexTemplate (this: That, params?: T.IndicesGetIndexTemplateRequest, options?: TransportRequestOptions): Promise async getIndexTemplate (this: That, params?: T.IndicesGetIndexTemplateRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['indices.get_index_template'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1533,13 +2486,16 @@ export default class Indices { /** * Get mapping definitions. For data streams, the API retrieves mappings for the stream’s backing indices. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-mapping | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-indices-get-mapping | Elasticsearch API documentation} */ async getMapping (this: That, params?: T.IndicesGetMappingRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getMapping (this: That, params?: T.IndicesGetMappingRequest, options?: TransportRequestOptionsWithMeta): Promise> async getMapping (this: That, params?: T.IndicesGetMappingRequest, options?: TransportRequestOptions): Promise async getMapping (this: That, params?: T.IndicesGetMappingRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] + const { + path: acceptedPath + } = this.acceptedParams['indices.get_mapping'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1583,13 +2539,16 @@ export default class Indices { /** * Get the migration reindexing status. Get the status of a migration reindex attempt for a data stream or index. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/migrate-data-stream.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/group/endpoint-migration | Elasticsearch API documentation} */ async getMigrateReindexStatus (this: That, params: T.IndicesGetMigrateReindexStatusRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getMigrateReindexStatus (this: That, params: T.IndicesGetMigrateReindexStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> async getMigrateReindexStatus (this: That, params: T.IndicesGetMigrateReindexStatusRequest, options?: TransportRequestOptions): Promise async getMigrateReindexStatus (this: That, params: T.IndicesGetMigrateReindexStatusRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] + const { + path: acceptedPath + } = this.acceptedParams['indices.get_migrate_reindex_status'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1625,13 +2584,16 @@ export default class Indices { /** * Get index settings. Get setting information for one or more indices. For data streams, it returns setting information for the stream's backing indices. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-settings | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-indices-get-settings | Elasticsearch API documentation} */ async getSettings (this: That, params?: T.IndicesGetSettingsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getSettings (this: That, params?: T.IndicesGetSettingsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getSettings (this: That, params?: T.IndicesGetSettingsRequest, options?: TransportRequestOptions): Promise async getSettings (this: That, params?: T.IndicesGetSettingsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index', 'name'] + const { + path: acceptedPath + } = this.acceptedParams['indices.get_settings'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1682,13 +2644,16 @@ export default class Indices { /** * Get index templates. Get information about one or more index templates. IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-template | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-indices-get-template | Elasticsearch API documentation} */ async getTemplate (this: That, params?: T.IndicesGetTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getTemplate (this: That, params?: T.IndicesGetTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> async getTemplate (this: That, params?: T.IndicesGetTemplateRequest, options?: TransportRequestOptions): Promise async getTemplate (this: That, params?: T.IndicesGetTemplateRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['indices.get_template'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1732,14 +2697,18 @@ export default class Indices { /** * Reindex legacy backing indices. Reindex all legacy backing indices for a data stream. This operation occurs in a persistent task. The persistent task ID is returned immediately and the reindexing work is completed in that task. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/migrate-data-stream.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-indices-migrate-reindex | Elasticsearch API documentation} */ async migrateReindex (this: That, params: T.IndicesMigrateReindexRequest, options?: TransportRequestOptionsWithOutMeta): Promise async migrateReindex (this: That, params: T.IndicesMigrateReindexRequest, options?: TransportRequestOptionsWithMeta): Promise> async migrateReindex (this: That, params: T.IndicesMigrateReindexRequest, options?: TransportRequestOptions): Promise async migrateReindex (this: That, params: T.IndicesMigrateReindexRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['reindex'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['indices.migrate_reindex'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1751,8 +2720,14 @@ export default class Indices { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -1766,13 +2741,16 @@ export default class Indices { /** * Convert an index alias to a data stream. Converts an index alias to a data stream. You must have a matching index template that is data stream enabled. The alias must meet the following criteria: The alias must have a write index; All indices for the alias must have a `@timestamp` field mapping of a `date` or `date_nanos` field type; The alias must not have any filters; The alias must not use custom routing. If successful, the request removes the alias and creates a data stream with the same name. The indices for the alias become hidden backing indices for the stream. The write index for the alias becomes the write index for the stream. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-indices-migrate-to-data-stream | Elasticsearch API documentation} */ async migrateToDataStream (this: That, params: T.IndicesMigrateToDataStreamRequest, options?: TransportRequestOptionsWithOutMeta): Promise async migrateToDataStream (this: That, params: T.IndicesMigrateToDataStreamRequest, options?: TransportRequestOptionsWithMeta): Promise> async migrateToDataStream (this: That, params: T.IndicesMigrateToDataStreamRequest, options?: TransportRequestOptions): Promise async migrateToDataStream (this: That, params: T.IndicesMigrateToDataStreamRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['indices.migrate_to_data_stream'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1808,14 +2786,18 @@ export default class Indices { /** * Update data streams. Performs one or more data stream modification actions in a single atomic operation. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-indices-modify-data-stream | Elasticsearch API documentation} */ async modifyDataStream (this: That, params: T.IndicesModifyDataStreamRequest, options?: TransportRequestOptionsWithOutMeta): Promise async modifyDataStream (this: That, params: T.IndicesModifyDataStreamRequest, options?: TransportRequestOptionsWithMeta): Promise> async modifyDataStream (this: That, params: T.IndicesModifyDataStreamRequest, options?: TransportRequestOptions): Promise async modifyDataStream (this: That, params: T.IndicesModifyDataStreamRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['actions'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['indices.modify_data_stream'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1837,8 +2819,14 @@ export default class Indices { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -1852,13 +2840,16 @@ export default class Indices { /** * Open a closed index. For data streams, the API opens any closed backing indices. A closed index is blocked for read/write operations and does not allow all operations that opened indices allow. It is not possible to index documents or to search for documents in a closed index. This allows closed indices to not have to maintain internal data structures for indexing or searching documents, resulting in a smaller overhead on the cluster. When opening or closing an index, the master is responsible for restarting the index shards to reflect the new state of the index. The shards will then go through the normal recovery process. The data of opened or closed indices is automatically replicated by the cluster to ensure that enough shard copies are safely kept around at all times. You can open and close multiple indices. An error is thrown if the request explicitly refers to a missing index. This behavior can be turned off by using the `ignore_unavailable=true` parameter. By default, you must explicitly name the indices you are opening or closing. To open or close indices with `_all`, `*`, or other wildcard expressions, change the `action.destructive_requires_name` setting to `false`. This setting can also be changed with the cluster update settings API. Closed indices consume a significant amount of disk-space which can cause problems in managed environments. Closing indices can be turned off with the cluster settings API by setting `cluster.indices.close.enable` to `false`. Because opening or closing an index allocates its shards, the `wait_for_active_shards` setting on index creation applies to the `_open` and `_close` index actions as well. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-open | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-indices-open | Elasticsearch API documentation} */ async open (this: That, params: T.IndicesOpenRequest, options?: TransportRequestOptionsWithOutMeta): Promise async open (this: That, params: T.IndicesOpenRequest, options?: TransportRequestOptionsWithMeta): Promise> async open (this: That, params: T.IndicesOpenRequest, options?: TransportRequestOptions): Promise async open (this: That, params: T.IndicesOpenRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] + const { + path: acceptedPath + } = this.acceptedParams['indices.open'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1894,13 +2885,16 @@ export default class Indices { /** * Promote a data stream. Promote a data stream from a replicated data stream managed by cross-cluster replication (CCR) to a regular data stream. With CCR auto following, a data stream from a remote cluster can be replicated to the local cluster. These data streams can't be rolled over in the local cluster. These replicated data streams roll over only if the upstream data stream rolls over. In the event that the remote cluster is no longer available, the data stream in the local cluster can be promoted to a regular data stream, which allows these data streams to be rolled over in the local cluster. NOTE: When promoting a data stream, ensure the local cluster has a data stream enabled index template that matches the data stream. If this is missing, the data stream will not be able to roll over until a matching index template is created. This will affect the lifecycle management of the data stream and interfere with the data stream size and retention. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-indices-promote-data-stream | Elasticsearch API documentation} */ async promoteDataStream (this: That, params: T.IndicesPromoteDataStreamRequest, options?: TransportRequestOptionsWithOutMeta): Promise async promoteDataStream (this: That, params: T.IndicesPromoteDataStreamRequest, options?: TransportRequestOptionsWithMeta): Promise> async promoteDataStream (this: That, params: T.IndicesPromoteDataStreamRequest, options?: TransportRequestOptions): Promise async promoteDataStream (this: That, params: T.IndicesPromoteDataStreamRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['indices.promote_data_stream'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1936,14 +2930,18 @@ export default class Indices { /** * Create or update an alias. Adds a data stream or index to an alias. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-aliases.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-indices-put-alias | Elasticsearch API documentation} */ async putAlias (this: That, params: T.IndicesPutAliasRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putAlias (this: That, params: T.IndicesPutAliasRequest, options?: TransportRequestOptionsWithMeta): Promise> async putAlias (this: That, params: T.IndicesPutAliasRequest, options?: TransportRequestOptions): Promise async putAlias (this: That, params: T.IndicesPutAliasRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index', 'name'] - const acceptedBody: string[] = ['filter', 'index_routing', 'is_write_index', 'routing', 'search_routing'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['indices.put_alias'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1965,8 +2963,14 @@ export default class Indices { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -1991,14 +2995,18 @@ export default class Indices { /** * Update data stream lifecycles. Update the data stream lifecycle of the specified data streams. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams-put-lifecycle.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-indices-put-data-lifecycle | Elasticsearch API documentation} */ async putDataLifecycle (this: That, params: T.IndicesPutDataLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putDataLifecycle (this: That, params: T.IndicesPutDataLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> async putDataLifecycle (this: That, params: T.IndicesPutDataLifecycleRequest, options?: TransportRequestOptions): Promise async putDataLifecycle (this: That, params: T.IndicesPutDataLifecycleRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] - const acceptedBody: string[] = ['data_retention', 'downsampling', 'enabled'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['indices.put_data_lifecycle'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2020,8 +3028,14 @@ export default class Indices { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2038,14 +3052,18 @@ export default class Indices { /** * Create or update an index template. Index templates define settings, mappings, and aliases that can be applied automatically to new indices. Elasticsearch applies templates to new indices based on an wildcard pattern that matches the index name. Index templates are applied during data stream or index creation. For data streams, these settings and mappings are applied when the stream's backing indices are created. Settings and mappings specified in a create index API request override any settings or mappings specified in an index template. Changes to index templates do not affect existing indices, including the existing backing indices of a data stream. You can use C-style `/* *\/` block comments in index templates. You can include comments anywhere in the request body, except before the opening curly bracket. **Multiple matching templates** If multiple index templates match the name of a new index or data stream, the template with the highest priority is used. Multiple templates with overlapping index patterns at the same priority are not allowed and an error will be thrown when attempting to create a template matching an existing index template at identical priorities. **Composing aliases, mappings, and settings** When multiple component templates are specified in the `composed_of` field for an index template, they are merged in the order specified, meaning that later component templates override earlier component templates. Any mappings, settings, or aliases from the parent index template are merged in next. Finally, any configuration on the index request itself is merged. Mapping definitions are merged recursively, which means that later mapping components can introduce new field mappings and update the mapping configuration. If a field mapping is already contained in an earlier component, its definition will be completely overwritten by the later one. This recursive merging strategy applies not only to field mappings, but also root options like `dynamic_templates` and `meta`. If an earlier component contains a `dynamic_templates` block, then by default new `dynamic_templates` entries are appended onto the end. If an entry already exists with the same key, then it is overwritten by the new definition. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-put-template.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-indices-put-index-template | Elasticsearch API documentation} */ async putIndexTemplate (this: That, params: T.IndicesPutIndexTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putIndexTemplate (this: That, params: T.IndicesPutIndexTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> async putIndexTemplate (this: That, params: T.IndicesPutIndexTemplateRequest, options?: TransportRequestOptions): Promise async putIndexTemplate (this: That, params: T.IndicesPutIndexTemplateRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] - const acceptedBody: string[] = ['index_patterns', 'composed_of', 'template', 'data_stream', 'priority', 'version', '_meta', 'allow_auto_create', 'ignore_missing_component_templates', 'deprecated'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['indices.put_index_template'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2067,8 +3085,14 @@ export default class Indices { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2085,14 +3109,18 @@ export default class Indices { /** * Update field mappings. Add new fields to an existing data stream or index. You can also use this API to change the search settings of existing fields and add new properties to existing object fields. For data streams, these changes are applied to all backing indices by default. **Add multi-fields to an existing field** Multi-fields let you index the same field in different ways. You can use this API to update the fields mapping parameter and enable multi-fields for an existing field. WARNING: If an index (or data stream) contains documents when you add a multi-field, those documents will not have values for the new multi-field. You can populate the new multi-field with the update by query API. **Change supported mapping parameters for an existing field** The documentation for each mapping parameter indicates whether you can update it for an existing field using this API. For example, you can use the update mapping API to update the `ignore_above` parameter. **Change the mapping of an existing field** Except for supported mapping parameters, you can't change the mapping or field type of an existing field. Changing an existing field could invalidate data that's already indexed. If you need to change the mapping of a field in a data stream's backing indices, refer to documentation about modifying data streams. If you need to change the mapping of a field in other indices, create a new index with the correct mapping and reindex your data into that index. **Rename a field** Renaming a field would invalidate data already indexed under the old field name. Instead, add an alias field to create an alternate field name. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-mapping | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-indices-put-mapping | Elasticsearch API documentation} */ async putMapping (this: That, params: T.IndicesPutMappingRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putMapping (this: That, params: T.IndicesPutMappingRequest, options?: TransportRequestOptionsWithMeta): Promise> async putMapping (this: That, params: T.IndicesPutMappingRequest, options?: TransportRequestOptions): Promise async putMapping (this: That, params: T.IndicesPutMappingRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['date_detection', 'dynamic', 'dynamic_date_formats', 'dynamic_templates', '_field_names', '_meta', 'numeric_detection', 'properties', '_routing', '_source', 'runtime'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['indices.put_mapping'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2114,8 +3142,14 @@ export default class Indices { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2132,14 +3166,18 @@ export default class Indices { /** * Update index settings. Changes dynamic index settings in real time. For data streams, index setting changes are applied to all backing indices by default. To revert a setting to the default value, use a null value. The list of per-index settings that can be updated dynamically on live indices can be found in index module documentation. To preserve existing settings from being updated, set the `preserve_existing` parameter to `true`. NOTE: You can only define new analyzers on closed indices. To add an analyzer, you must close the index, define the analyzer, and reopen the index. You cannot close the write index of a data stream. To update the analyzer for a data stream's write index and future backing indices, update the analyzer in the index template used by the stream. Then roll over the data stream to apply the new analyzer to the stream's write index and future backing indices. This affects searches and any new data added to the stream after the rollover. However, it does not affect the data stream's backing indices or their existing data. To change the analyzer for existing backing indices, you must create a new data stream and reindex your data into it. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-settings | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-indices-put-settings | Elasticsearch API documentation} */ async putSettings (this: That, params: T.IndicesPutSettingsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putSettings (this: That, params: T.IndicesPutSettingsRequest, options?: TransportRequestOptionsWithMeta): Promise> async putSettings (this: That, params: T.IndicesPutSettingsRequest, options?: TransportRequestOptions): Promise async putSettings (this: That, params: T.IndicesPutSettingsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['settings'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['indices.put_settings'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2151,8 +3189,14 @@ export default class Indices { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2176,14 +3220,18 @@ export default class Indices { /** * Create or update an index template. Index templates define settings, mappings, and aliases that can be applied automatically to new indices. Elasticsearch applies templates to new indices based on an index pattern that matches the index name. IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8. Composable templates always take precedence over legacy templates. If no composable template matches a new index, matching legacy templates are applied according to their order. Index templates are only applied during index creation. Changes to index templates do not affect existing indices. Settings and mappings specified in create index API requests override any settings or mappings specified in an index template. You can use C-style `/* *\/` block comments in index templates. You can include comments anywhere in the request body, except before the opening curly bracket. **Indices matching multiple templates** Multiple index templates can potentially match an index, in this case, both the settings and mappings are merged into the final configuration of the index. The order of the merging can be controlled using the order parameter, with lower order being applied first, and higher orders overriding them. NOTE: Multiple matching templates with the same order value will result in a non-deterministic merging order. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-template | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-indices-put-template | Elasticsearch API documentation} */ async putTemplate (this: That, params: T.IndicesPutTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putTemplate (this: That, params: T.IndicesPutTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> async putTemplate (this: That, params: T.IndicesPutTemplateRequest, options?: TransportRequestOptions): Promise async putTemplate (this: That, params: T.IndicesPutTemplateRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] - const acceptedBody: string[] = ['aliases', 'index_patterns', 'mappings', 'order', 'settings', 'version'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['indices.put_template'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2205,8 +3253,14 @@ export default class Indices { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2223,13 +3277,16 @@ export default class Indices { /** * Get index recovery information. Get information about ongoing and completed shard recoveries for one or more indices. For data streams, the API returns information for the stream's backing indices. All recoveries, whether ongoing or complete, are kept in the cluster state and may be reported on at any time. Shard recovery is the process of initializing a shard copy, such as restoring a primary shard from a snapshot or creating a replica shard from a primary shard. When a shard recovery completes, the recovered shard is available for search and indexing. Recovery automatically occurs during the following processes: * When creating an index for the first time. * When a node rejoins the cluster and starts up any missing primary shard copies using the data that it holds in its data path. * Creation of new replica shard copies from the primary. * Relocation of a shard copy to a different node in the same cluster. * A snapshot restore operation. * A clone, shrink, or split operation. You can determine the cause of a shard recovery using the recovery or cat recovery APIs. The index recovery API reports information about completed recoveries only for shard copies that currently exist in the cluster. It only reports the last recovery for each shard copy and does not report historical information about earlier recoveries, nor does it report information about the recoveries of shard copies that no longer exist. This means that if a shard copy completes a recovery and then Elasticsearch relocates it onto a different node then the information about the original recovery will not be shown in the recovery API. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-recovery | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-indices-recovery | Elasticsearch API documentation} */ async recovery (this: That, params?: T.IndicesRecoveryRequest, options?: TransportRequestOptionsWithOutMeta): Promise async recovery (this: That, params?: T.IndicesRecoveryRequest, options?: TransportRequestOptionsWithMeta): Promise> async recovery (this: That, params?: T.IndicesRecoveryRequest, options?: TransportRequestOptions): Promise async recovery (this: That, params?: T.IndicesRecoveryRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] + const { + path: acceptedPath + } = this.acceptedParams['indices.recovery'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2273,13 +3330,16 @@ export default class Indices { /** * Refresh an index. A refresh makes recent operations performed on one or more indices available for search. For data streams, the API runs the refresh operation on the stream’s backing indices. By default, Elasticsearch periodically refreshes indices every second, but only on indices that have received one search request or more in the last 30 seconds. You can change this default interval with the `index.refresh_interval` setting. Refresh requests are synchronous and do not return a response until the refresh operation completes. Refreshes are resource-intensive. To ensure good cluster performance, it's recommended to wait for Elasticsearch's periodic refresh rather than performing an explicit refresh when possible. If your application workflow indexes documents and then runs a search to retrieve the indexed document, it's recommended to use the index API's `refresh=wait_for` query parameter option. This option ensures the indexing operation waits for a periodic refresh before running the search. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-refresh | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-indices-refresh | Elasticsearch API documentation} */ async refresh (this: That, params?: T.IndicesRefreshRequest, options?: TransportRequestOptionsWithOutMeta): Promise async refresh (this: That, params?: T.IndicesRefreshRequest, options?: TransportRequestOptionsWithMeta): Promise> async refresh (this: That, params?: T.IndicesRefreshRequest, options?: TransportRequestOptions): Promise async refresh (this: That, params?: T.IndicesRefreshRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] + const { + path: acceptedPath + } = this.acceptedParams['indices.refresh'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2323,13 +3383,16 @@ export default class Indices { /** * Reload search analyzers. Reload an index's search analyzers and their resources. For data streams, the API reloads search analyzers and resources for the stream's backing indices. IMPORTANT: After reloading the search analyzers you should clear the request cache to make sure it doesn't contain responses derived from the previous versions of the analyzer. You can use the reload search analyzers API to pick up changes to synonym files used in the `synonym_graph` or `synonym` token filter of a search analyzer. To be eligible, the token filter must have an `updateable` flag of `true` and only be used in search analyzers. NOTE: This API does not perform a reload for each shard of an index. Instead, it performs a reload for each node containing index shards. As a result, the total shard count returned by the API can differ from the number of index shards. Because reloading affects every node with an index shard, it is important to update the synonym file on every data node in the cluster--including nodes that don't contain a shard replica--before using this API. This ensures the synonym file is updated everywhere in the cluster in case shards are relocated in the future. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-reload-search-analyzers | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-indices-reload-search-analyzers | Elasticsearch API documentation} */ async reloadSearchAnalyzers (this: That, params: T.IndicesReloadSearchAnalyzersRequest, options?: TransportRequestOptionsWithOutMeta): Promise async reloadSearchAnalyzers (this: That, params: T.IndicesReloadSearchAnalyzersRequest, options?: TransportRequestOptionsWithMeta): Promise> async reloadSearchAnalyzers (this: That, params: T.IndicesReloadSearchAnalyzersRequest, options?: TransportRequestOptions): Promise async reloadSearchAnalyzers (this: That, params: T.IndicesReloadSearchAnalyzersRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] + const { + path: acceptedPath + } = this.acceptedParams['indices.reload_search_analyzers'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2365,13 +3428,16 @@ export default class Indices { /** * Resolve the cluster. Resolve the specified index expressions to return information about each cluster, including the local "querying" cluster, if included. If no index expression is provided, the API will return information about all the remote clusters that are configured on the querying cluster. This endpoint is useful before doing a cross-cluster search in order to determine which remote clusters should be included in a search. You use the same index expression with this endpoint as you would for cross-cluster search. Index and cluster exclusions are also supported with this endpoint. For each cluster in the index expression, information is returned about: * Whether the querying ("local") cluster is currently connected to each remote cluster specified in the index expression. Note that this endpoint actively attempts to contact the remote clusters, unlike the `remote/info` endpoint. * Whether each remote cluster is configured with `skip_unavailable` as `true` or `false`. * Whether there are any indices, aliases, or data streams on that cluster that match the index expression. * Whether the search is likely to have errors returned when you do the cross-cluster search (including any authorization errors if you do not have permission to query the index). * Cluster version information, including the Elasticsearch server version. For example, `GET /_resolve/cluster/my-index-*,cluster*:my-index-*` returns information about the local cluster and all remotely configured clusters that start with the alias `cluster*`. Each cluster returns information about whether it has any indices, aliases or data streams that match `my-index-*`. ## Note on backwards compatibility The ability to query without an index expression was added in version 8.18, so when querying remote clusters older than that, the local cluster will send the index expression `dummy*` to those remote clusters. Thus, if an errors occur, you may see a reference to that index expression even though you didn't request it. If it causes a problem, you can instead include an index expression like `*:*` to bypass the issue. ## Advantages of using this endpoint before a cross-cluster search You may want to exclude a cluster or index from a search when: * A remote cluster is not currently connected and is configured with `skip_unavailable=false`. Running a cross-cluster search under those conditions will cause the entire search to fail. * A cluster has no matching indices, aliases or data streams for the index expression (or your user does not have permissions to search them). For example, suppose your index expression is `logs*,remote1:logs*` and the remote1 cluster has no indices, aliases or data streams that match `logs*`. In that case, that cluster will return no results from that cluster if you include it in a cross-cluster search. * The index expression (combined with any query parameters you specify) will likely cause an exception to be thrown when you do the search. In these cases, the "error" field in the `_resolve/cluster` response will be present. (This is also where security/permission errors will be shown.) * A remote cluster is an older version that does not support the feature you want to use in your search. ## Test availability of remote clusters The `remote/info` endpoint is commonly used to test whether the "local" cluster (the cluster being queried) is connected to its remote clusters, but it does not necessarily reflect whether the remote cluster is available or not. The remote cluster may be available, while the local cluster is not currently connected to it. You can use the `_resolve/cluster` API to attempt to reconnect to remote clusters. For example with `GET _resolve/cluster` or `GET _resolve/cluster/*:*`. The `connected` field in the response will indicate whether it was successful. If a connection was (re-)established, this will also cause the `remote/info` endpoint to now indicate a connected status. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-resolve-cluster | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-indices-resolve-cluster | Elasticsearch API documentation} */ async resolveCluster (this: That, params?: T.IndicesResolveClusterRequest, options?: TransportRequestOptionsWithOutMeta): Promise async resolveCluster (this: That, params?: T.IndicesResolveClusterRequest, options?: TransportRequestOptionsWithMeta): Promise> async resolveCluster (this: That, params?: T.IndicesResolveClusterRequest, options?: TransportRequestOptions): Promise async resolveCluster (this: That, params?: T.IndicesResolveClusterRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['indices.resolve_cluster'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2415,13 +3481,16 @@ export default class Indices { /** * Resolve indices. Resolve the names and/or index patterns for indices, aliases, and data streams. Multiple patterns and remote clusters are supported. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-resolve-index | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-indices-resolve-index | Elasticsearch API documentation} */ async resolveIndex (this: That, params: T.IndicesResolveIndexRequest, options?: TransportRequestOptionsWithOutMeta): Promise async resolveIndex (this: That, params: T.IndicesResolveIndexRequest, options?: TransportRequestOptionsWithMeta): Promise> async resolveIndex (this: That, params: T.IndicesResolveIndexRequest, options?: TransportRequestOptions): Promise async resolveIndex (this: That, params: T.IndicesResolveIndexRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['indices.resolve_index'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2457,14 +3526,18 @@ export default class Indices { /** * Roll over to a new index. TIP: It is recommended to use the index lifecycle rollover action to automate rollovers. The rollover API creates a new index for a data stream or index alias. The API behavior depends on the rollover target. **Roll over a data stream** If you roll over a data stream, the API creates a new write index for the stream. The stream's previous write index becomes a regular backing index. A rollover also increments the data stream's generation. **Roll over an index alias with a write index** TIP: Prior to Elasticsearch 7.9, you'd typically use an index alias with a write index to manage time series data. Data streams replace this functionality, require less maintenance, and automatically integrate with data tiers. If an index alias points to multiple indices, one of the indices must be a write index. The rollover API creates a new write index for the alias with `is_write_index` set to `true`. The API also `sets is_write_index` to `false` for the previous write index. **Roll over an index alias with one index** If you roll over an index alias that points to only one index, the API creates a new index for the alias and removes the original index from the alias. NOTE: A rollover creates a new index and is subject to the `wait_for_active_shards` setting. **Increment index names for an alias** When you roll over an index alias, you can specify a name for the new index. If you don't specify a name and the current index ends with `-` and a number, such as `my-index-000001` or `my-index-3`, the new index name increments that number. For example, if you roll over an alias with a current index of `my-index-000001`, the rollover creates a new index named `my-index-000002`. This number is always six characters and zero-padded, regardless of the previous index's name. If you use an index alias for time series data, you can use date math in the index name to track the rollover date. For example, you can create an alias that points to an index named ``. If you create the index on May 6, 2099, the index's name is `my-index-2099.05.06-000001`. If you roll over the alias on May 7, 2099, the new index's name is `my-index-2099.05.07-000002`. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-rollover | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-indices-rollover | Elasticsearch API documentation} */ async rollover (this: That, params: T.IndicesRolloverRequest, options?: TransportRequestOptionsWithOutMeta): Promise async rollover (this: That, params: T.IndicesRolloverRequest, options?: TransportRequestOptionsWithMeta): Promise> async rollover (this: That, params: T.IndicesRolloverRequest, options?: TransportRequestOptions): Promise async rollover (this: That, params: T.IndicesRolloverRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['alias', 'new_index'] - const acceptedBody: string[] = ['aliases', 'conditions', 'mappings', 'settings'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['indices.rollover'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2486,8 +3559,14 @@ export default class Indices { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2512,13 +3591,16 @@ export default class Indices { /** * Get index segments. Get low-level information about the Lucene segments in index shards. For data streams, the API returns information about the stream's backing indices. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-segments | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-indices-segments | Elasticsearch API documentation} */ async segments (this: That, params?: T.IndicesSegmentsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async segments (this: That, params?: T.IndicesSegmentsRequest, options?: TransportRequestOptionsWithMeta): Promise> async segments (this: That, params?: T.IndicesSegmentsRequest, options?: TransportRequestOptions): Promise async segments (this: That, params?: T.IndicesSegmentsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] + const { + path: acceptedPath + } = this.acceptedParams['indices.segments'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2562,13 +3644,16 @@ export default class Indices { /** * Get index shard stores. Get store information about replica shards in one or more indices. For data streams, the API retrieves store information for the stream's backing indices. The index shard stores API returns the following information: * The node on which each replica shard exists. * The allocation ID for each replica shard. * A unique ID for each replica shard. * Any errors encountered while opening the shard index or from an earlier failure. By default, the API returns store information only for primary shards that are unassigned or have one or more unassigned replica shards. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-shard-stores | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-indices-shard-stores | Elasticsearch API documentation} */ async shardStores (this: That, params?: T.IndicesShardStoresRequest, options?: TransportRequestOptionsWithOutMeta): Promise async shardStores (this: That, params?: T.IndicesShardStoresRequest, options?: TransportRequestOptionsWithMeta): Promise> async shardStores (this: That, params?: T.IndicesShardStoresRequest, options?: TransportRequestOptions): Promise async shardStores (this: That, params?: T.IndicesShardStoresRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] + const { + path: acceptedPath + } = this.acceptedParams['indices.shard_stores'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2612,14 +3697,18 @@ export default class Indices { /** * Shrink an index. Shrink an index into a new index with fewer primary shards. Before you can shrink an index: * The index must be read-only. * A copy of every shard in the index must reside on the same node. * The index must have a green health status. To make shard allocation easier, we recommend you also remove the index's replica shards. You can later re-add replica shards as part of the shrink operation. The requested number of primary shards in the target index must be a factor of the number of shards in the source index. For example an index with 8 primary shards can be shrunk into 4, 2 or 1 primary shards or an index with 15 primary shards can be shrunk into 5, 3 or 1. If the number of shards in the index is a prime number it can only be shrunk into a single primary shard Before shrinking, a (primary or replica) copy of every shard in the index must be present on the same node. The current write index on a data stream cannot be shrunk. In order to shrink the current write index, the data stream must first be rolled over so that a new write index is created and then the previous write index can be shrunk. A shrink operation: * Creates a new target index with the same definition as the source index, but with a smaller number of primary shards. * Hard-links segments from the source index into the target index. If the file system does not support hard-linking, then all segments are copied into the new index, which is a much more time consuming process. Also if using multiple data paths, shards on different data paths require a full copy of segment files if they are not on the same disk since hardlinks do not work across disks. * Recovers the target index as though it were a closed index which had just been re-opened. Recovers shards to the `.routing.allocation.initial_recovery._id` index setting. IMPORTANT: Indices can only be shrunk if they satisfy the following requirements: * The target index must not exist. * The source index must have more primary shards than the target index. * The number of primary shards in the target index must be a factor of the number of primary shards in the source index. The source index must have more primary shards than the target index. * The index must not contain more than 2,147,483,519 documents in total across all shards that will be shrunk into a single shard on the target index as this is the maximum number of docs that can fit into a single shard. * The node handling the shrink process must have sufficient free disk space to accommodate a second copy of the existing index. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-shrink | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-indices-shrink | Elasticsearch API documentation} */ async shrink (this: That, params: T.IndicesShrinkRequest, options?: TransportRequestOptionsWithOutMeta): Promise async shrink (this: That, params: T.IndicesShrinkRequest, options?: TransportRequestOptionsWithMeta): Promise> async shrink (this: That, params: T.IndicesShrinkRequest, options?: TransportRequestOptions): Promise async shrink (this: That, params: T.IndicesShrinkRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index', 'target'] - const acceptedBody: string[] = ['aliases', 'settings'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['indices.shrink'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2641,8 +3730,14 @@ export default class Indices { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2660,13 +3755,16 @@ export default class Indices { /** * Simulate an index. Get the index configuration that would be applied to the specified index from an existing index template. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-simulate-index-template | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-indices-simulate-index-template | Elasticsearch API documentation} */ async simulateIndexTemplate (this: That, params: T.IndicesSimulateIndexTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise async simulateIndexTemplate (this: That, params: T.IndicesSimulateIndexTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> async simulateIndexTemplate (this: That, params: T.IndicesSimulateIndexTemplateRequest, options?: TransportRequestOptions): Promise async simulateIndexTemplate (this: That, params: T.IndicesSimulateIndexTemplateRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['indices.simulate_index_template'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2702,14 +3800,18 @@ export default class Indices { /** * Simulate an index template. Get the index configuration that would be applied by a particular index template. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-simulate-template | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-indices-simulate-template | Elasticsearch API documentation} */ async simulateTemplate (this: That, params?: T.IndicesSimulateTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise async simulateTemplate (this: That, params?: T.IndicesSimulateTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> async simulateTemplate (this: That, params?: T.IndicesSimulateTemplateRequest, options?: TransportRequestOptions): Promise async simulateTemplate (this: That, params?: T.IndicesSimulateTemplateRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] - const acceptedBody: string[] = ['allow_auto_create', 'index_patterns', 'composed_of', 'template', 'data_stream', 'priority', 'version', '_meta', 'ignore_missing_component_templates', 'deprecated'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['indices.simulate_template'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2732,8 +3834,14 @@ export default class Indices { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2757,14 +3865,18 @@ export default class Indices { /** * Split an index. Split an index into a new index with more primary shards. * Before you can split an index: * The index must be read-only. * The cluster health status must be green. You can do make an index read-only with the following request using the add index block API: ``` PUT /my_source_index/_block/write ``` The current write index on a data stream cannot be split. In order to split the current write index, the data stream must first be rolled over so that a new write index is created and then the previous write index can be split. The number of times the index can be split (and the number of shards that each original shard can be split into) is determined by the `index.number_of_routing_shards` setting. The number of routing shards specifies the hashing space that is used internally to distribute documents across shards with consistent hashing. For instance, a 5 shard index with `number_of_routing_shards` set to 30 (5 x 2 x 3) could be split by a factor of 2 or 3. A split operation: * Creates a new target index with the same definition as the source index, but with a larger number of primary shards. * Hard-links segments from the source index into the target index. If the file system doesn't support hard-linking, all segments are copied into the new index, which is a much more time consuming process. * Hashes all documents again, after low level files are created, to delete documents that belong to a different shard. * Recovers the target index as though it were a closed index which had just been re-opened. IMPORTANT: Indices can only be split if they satisfy the following requirements: * The target index must not exist. * The source index must have fewer primary shards than the target index. * The number of primary shards in the target index must be a multiple of the number of primary shards in the source index. * The node handling the split process must have sufficient free disk space to accommodate a second copy of the existing index. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-split | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-indices-split | Elasticsearch API documentation} */ async split (this: That, params: T.IndicesSplitRequest, options?: TransportRequestOptionsWithOutMeta): Promise async split (this: That, params: T.IndicesSplitRequest, options?: TransportRequestOptionsWithMeta): Promise> async split (this: That, params: T.IndicesSplitRequest, options?: TransportRequestOptions): Promise async split (this: That, params: T.IndicesSplitRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index', 'target'] - const acceptedBody: string[] = ['aliases', 'settings'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['indices.split'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2786,8 +3898,14 @@ export default class Indices { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2805,13 +3923,16 @@ export default class Indices { /** * Get index statistics. For data streams, the API retrieves statistics for the stream's backing indices. By default, the returned statistics are index-level with `primaries` and `total` aggregations. `primaries` are the values for only the primary shards. `total` are the accumulated values for both primary and replica shards. To get shard-level statistics, set the `level` parameter to `shards`. NOTE: When moving to another node, the shard-level statistics for a shard are cleared. Although the shard is no longer part of the node, that node retains any node-level statistics to which the shard contributed. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-stats | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-indices-stats | Elasticsearch API documentation} */ async stats (this: That, params?: T.IndicesStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async stats (this: That, params?: T.IndicesStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> async stats (this: That, params?: T.IndicesStatsRequest, options?: TransportRequestOptions): Promise async stats (this: That, params?: T.IndicesStatsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['metric', 'index'] + const { + path: acceptedPath + } = this.acceptedParams['indices.stats'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2862,14 +3983,18 @@ export default class Indices { /** * Create or update an alias. Adds a data stream or index to an alias. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-aliases.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-indices-update-aliases | Elasticsearch API documentation} */ async updateAliases (this: That, params?: T.IndicesUpdateAliasesRequest, options?: TransportRequestOptionsWithOutMeta): Promise async updateAliases (this: That, params?: T.IndicesUpdateAliasesRequest, options?: TransportRequestOptionsWithMeta): Promise> async updateAliases (this: That, params?: T.IndicesUpdateAliasesRequest, options?: TransportRequestOptions): Promise async updateAliases (this: That, params?: T.IndicesUpdateAliasesRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['actions'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['indices.update_aliases'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2892,8 +4017,14 @@ export default class Indices { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2907,14 +4038,18 @@ export default class Indices { /** * Validate a query. Validates a query without running it. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/search-validate.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-indices-validate-query | Elasticsearch API documentation} */ async validateQuery (this: That, params?: T.IndicesValidateQueryRequest, options?: TransportRequestOptionsWithOutMeta): Promise async validateQuery (this: That, params?: T.IndicesValidateQueryRequest, options?: TransportRequestOptionsWithMeta): Promise> async validateQuery (this: That, params?: T.IndicesValidateQueryRequest, options?: TransportRequestOptions): Promise async validateQuery (this: That, params?: T.IndicesValidateQueryRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['query'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['indices.validate_query'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2937,8 +4072,14 @@ export default class Indices { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/inference.ts b/src/api/api/inference.ts index b7c9fb55a..2946431f7 100644 --- a/src/api/api/inference.ts +++ b/src/api/api/inference.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,23 +21,464 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class Inference { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'inference.chat_completion_unified': { + path: [ + 'inference_id' + ], + body: [ + 'chat_completion_request' + ], + query: [ + 'timeout' + ] + }, + 'inference.completion': { + path: [ + 'inference_id' + ], + body: [ + 'input', + 'task_settings' + ], + query: [ + 'timeout' + ] + }, + 'inference.delete': { + path: [ + 'task_type', + 'inference_id' + ], + body: [], + query: [ + 'dry_run', + 'force' + ] + }, + 'inference.get': { + path: [ + 'task_type', + 'inference_id' + ], + body: [], + query: [] + }, + 'inference.inference': { + path: [ + 'task_type', + 'inference_id' + ], + body: [ + 'query', + 'input', + 'task_settings' + ], + query: [ + 'timeout' + ] + }, + 'inference.put': { + path: [ + 'task_type', + 'inference_id' + ], + body: [ + 'inference_config' + ], + query: [] + }, + 'inference.put_alibabacloud': { + path: [ + 'task_type', + 'alibabacloud_inference_id' + ], + body: [ + 'chunking_settings', + 'service', + 'service_settings', + 'task_settings' + ], + query: [] + }, + 'inference.put_amazonbedrock': { + path: [ + 'task_type', + 'amazonbedrock_inference_id' + ], + body: [ + 'chunking_settings', + 'service', + 'service_settings', + 'task_settings' + ], + query: [] + }, + 'inference.put_anthropic': { + path: [ + 'task_type', + 'anthropic_inference_id' + ], + body: [ + 'chunking_settings', + 'service', + 'service_settings', + 'task_settings' + ], + query: [] + }, + 'inference.put_azureaistudio': { + path: [ + 'task_type', + 'azureaistudio_inference_id' + ], + body: [ + 'chunking_settings', + 'service', + 'service_settings', + 'task_settings' + ], + query: [] + }, + 'inference.put_azureopenai': { + path: [ + 'task_type', + 'azureopenai_inference_id' + ], + body: [ + 'chunking_settings', + 'service', + 'service_settings', + 'task_settings' + ], + query: [] + }, + 'inference.put_cohere': { + path: [ + 'task_type', + 'cohere_inference_id' + ], + body: [ + 'chunking_settings', + 'service', + 'service_settings', + 'task_settings' + ], + query: [] + }, + 'inference.put_elasticsearch': { + path: [ + 'task_type', + 'elasticsearch_inference_id' + ], + body: [ + 'chunking_settings', + 'service', + 'service_settings', + 'task_settings' + ], + query: [] + }, + 'inference.put_elser': { + path: [ + 'task_type', + 'elser_inference_id' + ], + body: [ + 'chunking_settings', + 'service', + 'service_settings' + ], + query: [] + }, + 'inference.put_googleaistudio': { + path: [ + 'task_type', + 'googleaistudio_inference_id' + ], + body: [ + 'chunking_settings', + 'service', + 'service_settings' + ], + query: [] + }, + 'inference.put_googlevertexai': { + path: [ + 'task_type', + 'googlevertexai_inference_id' + ], + body: [ + 'chunking_settings', + 'service', + 'service_settings', + 'task_settings' + ], + query: [] + }, + 'inference.put_hugging_face': { + path: [ + 'task_type', + 'huggingface_inference_id' + ], + body: [ + 'chunking_settings', + 'service', + 'service_settings' + ], + query: [] + }, + 'inference.put_jinaai': { + path: [ + 'task_type', + 'jinaai_inference_id' + ], + body: [ + 'chunking_settings', + 'service', + 'service_settings', + 'task_settings' + ], + query: [] + }, + 'inference.put_mistral': { + path: [ + 'task_type', + 'mistral_inference_id' + ], + body: [ + 'chunking_settings', + 'service', + 'service_settings' + ], + query: [] + }, + 'inference.put_openai': { + path: [ + 'task_type', + 'openai_inference_id' + ], + body: [ + 'chunking_settings', + 'service', + 'service_settings', + 'task_settings' + ], + query: [] + }, + 'inference.put_voyageai': { + path: [ + 'task_type', + 'voyageai_inference_id' + ], + body: [ + 'chunking_settings', + 'service', + 'service_settings', + 'task_settings' + ], + query: [] + }, + 'inference.put_watsonx': { + path: [ + 'task_type', + 'watsonx_inference_id' + ], + body: [ + 'service', + 'service_settings' + ], + query: [] + }, + 'inference.rerank': { + path: [ + 'inference_id' + ], + body: [ + 'query', + 'input', + 'task_settings' + ], + query: [ + 'timeout' + ] + }, + 'inference.sparse_embedding': { + path: [ + 'inference_id' + ], + body: [ + 'input', + 'task_settings' + ], + query: [ + 'timeout' + ] + }, + 'inference.stream_completion': { + path: [ + 'inference_id' + ], + body: [ + 'input', + 'task_settings' + ], + query: [] + }, + 'inference.text_embedding': { + path: [ + 'inference_id' + ], + body: [ + 'input', + 'task_settings' + ], + query: [ + 'timeout' + ] + }, + 'inference.update': { + path: [ + 'inference_id', + 'task_type' + ], + body: [ + 'inference_config' + ], + query: [] + } + } + } + + /** + * Perform chat completion inference + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-inference-unified-inference | Elasticsearch API documentation} + */ + async chatCompletionUnified (this: That, params: T.InferenceChatCompletionUnifiedRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async chatCompletionUnified (this: That, params: T.InferenceChatCompletionUnifiedRequest, options?: TransportRequestOptionsWithMeta): Promise> + async chatCompletionUnified (this: That, params: T.InferenceChatCompletionUnifiedRequest, options?: TransportRequestOptions): Promise + async chatCompletionUnified (this: That, params: T.InferenceChatCompletionUnifiedRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['inference.chat_completion_unified'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: any = params.body ?? undefined + for (const key in params) { + if (acceptedBody.includes(key)) { + // @ts-expect-error + body = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'POST' + const path = `/_inference/chat_completion/${encodeURIComponent(params.inference_id.toString())}/_stream` + const meta: TransportRequestMetadata = { + name: 'inference.chat_completion_unified', + pathParts: { + inference_id: params.inference_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Perform completion inference on the service + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-inference-inference | Elasticsearch API documentation} + */ + async completion (this: That, params: T.InferenceCompletionRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async completion (this: That, params: T.InferenceCompletionRequest, options?: TransportRequestOptionsWithMeta): Promise> + async completion (this: That, params: T.InferenceCompletionRequest, options?: TransportRequestOptions): Promise + async completion (this: That, params: T.InferenceCompletionRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['inference.completion'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'POST' + const path = `/_inference/completion/${encodeURIComponent(params.inference_id.toString())}` + const meta: TransportRequestMetadata = { + name: 'inference.completion', + pathParts: { + inference_id: params.inference_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) } /** * Delete an inference endpoint - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-delete | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-inference-delete | Elasticsearch API documentation} */ async delete (this: That, params: T.InferenceDeleteRequest, options?: TransportRequestOptionsWithOutMeta): Promise async delete (this: That, params: T.InferenceDeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> async delete (this: That, params: T.InferenceDeleteRequest, options?: TransportRequestOptions): Promise async delete (this: That, params: T.InferenceDeleteRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['task_type', 'inference_id'] + const { + path: acceptedPath + } = this.acceptedParams['inference.delete'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -95,13 +522,16 @@ export default class Inference { /** * Get an inference endpoint - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-get | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-inference-get | Elasticsearch API documentation} */ async get (this: That, params?: T.InferenceGetRequest, options?: TransportRequestOptionsWithOutMeta): Promise async get (this: That, params?: T.InferenceGetRequest, options?: TransportRequestOptionsWithMeta): Promise> async get (this: That, params?: T.InferenceGetRequest, options?: TransportRequestOptions): Promise async get (this: That, params?: T.InferenceGetRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['task_type', 'inference_id'] + const { + path: acceptedPath + } = this.acceptedParams['inference.get'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -148,15 +578,19 @@ export default class Inference { } /** - * Perform inference on the service. This API enables you to use machine learning models to perform specific tasks on data that you provide as an input. It returns a response with the results of the tasks. The inference endpoint you use can perform one specific task that has been defined when the endpoint was created with the create inference API. > info > The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-inference | Elasticsearch API documentation} + * Perform inference on the service. This API enables you to use machine learning models to perform specific tasks on data that you provide as an input. It returns a response with the results of the tasks. The inference endpoint you use can perform one specific task that has been defined when the endpoint was created with the create inference API. For details about using this API with a service, such as Amazon Bedrock, Anthropic, or HuggingFace, refer to the service-specific documentation. > info > The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-inference-inference | Elasticsearch API documentation} */ async inference (this: That, params: T.InferenceInferenceRequest, options?: TransportRequestOptionsWithOutMeta): Promise async inference (this: That, params: T.InferenceInferenceRequest, options?: TransportRequestOptionsWithMeta): Promise> async inference (this: That, params: T.InferenceInferenceRequest, options?: TransportRequestOptions): Promise async inference (this: That, params: T.InferenceInferenceRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['task_type', 'inference_id'] - const acceptedBody: string[] = ['query', 'input', 'task_settings'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['inference.inference'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -178,8 +612,14 @@ export default class Inference { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -204,14 +644,18 @@ export default class Inference { /** * Create an inference endpoint. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-inference-put | Elasticsearch API documentation} */ async put (this: That, params: T.InferencePutRequest, options?: TransportRequestOptionsWithOutMeta): Promise async put (this: That, params: T.InferencePutRequest, options?: TransportRequestOptionsWithMeta): Promise> async put (this: That, params: T.InferencePutRequest, options?: TransportRequestOptions): Promise async put (this: That, params: T.InferencePutRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['task_type', 'inference_id'] - const acceptedBody: string[] = ['inference_config'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['inference.put'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -223,8 +667,14 @@ export default class Inference { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -248,15 +698,19 @@ export default class Inference { } /** - * Perform streaming inference. Get real-time responses for completion tasks by delivering answers incrementally, reducing response times during computation. This API works only with the completion task type. IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs. This API requires the `monitor_inference` cluster privilege (the built-in `inference_admin` and `inference_user` roles grant this privilege). You must use a client that supports streaming. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-stream-inference | Elasticsearch API documentation} + * Create an AlibabaCloud AI Search inference endpoint. Create an inference endpoint to perform an inference task with the `alibabacloud-ai-search` service. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-inference-put-alibabacloud | Elasticsearch API documentation} */ - async streamInference (this: That, params: T.InferenceStreamInferenceRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async streamInference (this: That, params: T.InferenceStreamInferenceRequest, options?: TransportRequestOptionsWithMeta): Promise> - async streamInference (this: That, params: T.InferenceStreamInferenceRequest, options?: TransportRequestOptions): Promise - async streamInference (this: That, params: T.InferenceStreamInferenceRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['inference_id', 'task_type'] - const acceptedBody: string[] = ['input'] + async putAlibabacloud (this: That, params: T.InferencePutAlibabacloudRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putAlibabacloud (this: That, params: T.InferencePutAlibabacloudRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putAlibabacloud (this: That, params: T.InferencePutAlibabacloudRequest, options?: TransportRequestOptions): Promise + async putAlibabacloud (this: That, params: T.InferencePutAlibabacloudRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['inference.put_alibabacloud'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -278,40 +732,101 @@ export default class Inference { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } - let method = '' - let path = '' - if (params.task_type != null && params.inference_id != null) { - method = 'POST' - path = `/_inference/${encodeURIComponent(params.task_type.toString())}/${encodeURIComponent(params.inference_id.toString())}/_stream` - } else { - method = 'POST' - path = `/_inference/${encodeURIComponent(params.inference_id.toString())}/_stream` + const method = 'PUT' + const path = `/_inference/${encodeURIComponent(params.task_type.toString())}/${encodeURIComponent(params.alibabacloud_inference_id.toString())}` + const meta: TransportRequestMetadata = { + name: 'inference.put_alibabacloud', + pathParts: { + task_type: params.task_type, + alibabacloud_inference_id: params.alibabacloud_inference_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Create an Amazon Bedrock inference endpoint. Creates an inference endpoint to perform an inference task with the `amazonbedrock` service. >info > You need to provide the access and secret keys only once, during the inference model creation. The get inference API does not retrieve your access or secret keys. After creating the inference model, you cannot change the associated key pairs. If you want to use a different access and secret key pair, delete the inference model and recreate it with the same name and the updated keys. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-inference-put-amazonbedrock | Elasticsearch API documentation} + */ + async putAmazonbedrock (this: That, params: T.InferencePutAmazonbedrockRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putAmazonbedrock (this: That, params: T.InferencePutAmazonbedrockRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putAmazonbedrock (this: That, params: T.InferencePutAmazonbedrockRequest, options?: TransportRequestOptions): Promise + async putAmazonbedrock (this: That, params: T.InferencePutAmazonbedrockRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['inference.put_amazonbedrock'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } } + + const method = 'PUT' + const path = `/_inference/${encodeURIComponent(params.task_type.toString())}/${encodeURIComponent(params.amazonbedrock_inference_id.toString())}` const meta: TransportRequestMetadata = { - name: 'inference.stream_inference', + name: 'inference.put_amazonbedrock', pathParts: { - inference_id: params.inference_id, - task_type: params.task_type + task_type: params.task_type, + amazonbedrock_inference_id: params.amazonbedrock_inference_id } } return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Perform inference on the service using the Unified Schema - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/unified-inference-api.html | Elasticsearch API documentation} + * Create an Anthropic inference endpoint. Create an inference endpoint to perform an inference task with the `anthropic` service. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-inference-put-anthropic | Elasticsearch API documentation} */ - async unifiedInference (this: That, params: T.InferenceUnifiedInferenceRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async unifiedInference (this: That, params: T.InferenceUnifiedInferenceRequest, options?: TransportRequestOptionsWithMeta): Promise> - async unifiedInference (this: That, params: T.InferenceUnifiedInferenceRequest, options?: TransportRequestOptions): Promise - async unifiedInference (this: That, params: T.InferenceUnifiedInferenceRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['task_type', 'inference_id'] - const acceptedBody: string[] = ['messages', 'model', 'max_completion_tokens', 'stop', 'temperature', 'tool_choice', 'tools', 'top_p'] + async putAnthropic (this: That, params: T.InferencePutAnthropicRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putAnthropic (this: That, params: T.InferencePutAnthropicRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putAnthropic (this: That, params: T.InferencePutAnthropicRequest, options?: TransportRequestOptions): Promise + async putAnthropic (this: That, params: T.InferencePutAnthropicRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['inference.put_anthropic'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -333,63 +848,1054 @@ export default class Inference { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'PUT' + const path = `/_inference/${encodeURIComponent(params.task_type.toString())}/${encodeURIComponent(params.anthropic_inference_id.toString())}` + const meta: TransportRequestMetadata = { + name: 'inference.put_anthropic', + pathParts: { + task_type: params.task_type, + anthropic_inference_id: params.anthropic_inference_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Create an Azure AI studio inference endpoint. Create an inference endpoint to perform an inference task with the `azureaistudio` service. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-inference-put-azureaistudio | Elasticsearch API documentation} + */ + async putAzureaistudio (this: That, params: T.InferencePutAzureaistudioRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putAzureaistudio (this: That, params: T.InferencePutAzureaistudioRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putAzureaistudio (this: That, params: T.InferencePutAzureaistudioRequest, options?: TransportRequestOptions): Promise + async putAzureaistudio (this: That, params: T.InferencePutAzureaistudioRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['inference.put_azureaistudio'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error - querystring[key] = params[key] + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } - let method = '' - let path = '' - if (params.task_type != null && params.inference_id != null) { - method = 'POST' - path = `/_inference/${encodeURIComponent(params.task_type.toString())}/${encodeURIComponent(params.inference_id.toString())}/_unified` - } else { - method = 'POST' - path = `/_inference/${encodeURIComponent(params.inference_id.toString())}/_unified` + const method = 'PUT' + const path = `/_inference/${encodeURIComponent(params.task_type.toString())}/${encodeURIComponent(params.azureaistudio_inference_id.toString())}` + const meta: TransportRequestMetadata = { + name: 'inference.put_azureaistudio', + pathParts: { + task_type: params.task_type, + azureaistudio_inference_id: params.azureaistudio_inference_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Create an Azure OpenAI inference endpoint. Create an inference endpoint to perform an inference task with the `azureopenai` service. The list of chat completion models that you can choose from in your Azure OpenAI deployment include: * [GPT-4 and GPT-4 Turbo models](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=global-standard%2Cstandard-chat-completions#gpt-4-and-gpt-4-turbo-models) * [GPT-3.5](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=global-standard%2Cstandard-chat-completions#gpt-35) The list of embeddings models that you can choose from in your deployment can be found in the [Azure models documentation](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models?tabs=global-standard%2Cstandard-chat-completions#embeddings). When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-inference-put-azureopenai | Elasticsearch API documentation} + */ + async putAzureopenai (this: That, params: T.InferencePutAzureopenaiRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putAzureopenai (this: That, params: T.InferencePutAzureopenaiRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putAzureopenai (this: That, params: T.InferencePutAzureopenaiRequest, options?: TransportRequestOptions): Promise + async putAzureopenai (this: That, params: T.InferencePutAzureopenaiRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['inference.put_azureopenai'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } } + + const method = 'PUT' + const path = `/_inference/${encodeURIComponent(params.task_type.toString())}/${encodeURIComponent(params.azureopenai_inference_id.toString())}` const meta: TransportRequestMetadata = { - name: 'inference.unified_inference', + name: 'inference.put_azureopenai', pathParts: { task_type: params.task_type, - inference_id: params.inference_id + azureopenai_inference_id: params.azureopenai_inference_id } } return await this.transport.request({ path, method, querystring, body, meta }, options) } /** - * Update an inference endpoint. Modify `task_settings`, secrets (within `service_settings`), or `num_allocations` for an inference endpoint, depending on the specific endpoint service and `task_type`. IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-update | Elasticsearch API documentation} + * Create a Cohere inference endpoint. Create an inference endpoint to perform an inference task with the `cohere` service. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-inference-put-cohere | Elasticsearch API documentation} */ - async update (this: That, params: T.InferenceUpdateRequest, options?: TransportRequestOptionsWithOutMeta): Promise - async update (this: That, params: T.InferenceUpdateRequest, options?: TransportRequestOptionsWithMeta): Promise> - async update (this: That, params: T.InferenceUpdateRequest, options?: TransportRequestOptions): Promise - async update (this: That, params: T.InferenceUpdateRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['inference_id', 'task_type'] - const acceptedBody: string[] = ['inference_config'] + async putCohere (this: That, params: T.InferencePutCohereRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putCohere (this: That, params: T.InferencePutCohereRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putCohere (this: That, params: T.InferencePutCohereRequest, options?: TransportRequestOptions): Promise + async putCohere (this: That, params: T.InferencePutCohereRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['inference.put_cohere'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} - let body: any = params.body ?? undefined + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + for (const key in params) { if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error - body = params[key] + body[key] = params[key] } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'PUT' + const path = `/_inference/${encodeURIComponent(params.task_type.toString())}/${encodeURIComponent(params.cohere_inference_id.toString())}` + const meta: TransportRequestMetadata = { + name: 'inference.put_cohere', + pathParts: { + task_type: params.task_type, + cohere_inference_id: params.cohere_inference_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Create an Elasticsearch inference endpoint. Create an inference endpoint to perform an inference task with the `elasticsearch` service. > info > Your Elasticsearch deployment contains preconfigured ELSER and E5 inference endpoints, you only need to create the enpoints using the API if you want to customize the settings. If you use the ELSER or the E5 model through the `elasticsearch` service, the API request will automatically download and deploy the model if it isn't downloaded yet. > info > You might see a 502 bad gateway error in the response when using the Kibana Console. This error usually just reflects a timeout, while the model downloads in the background. You can check the download progress in the Machine Learning UI. If using the Python client, you can set the timeout parameter to a higher value. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-inference-put-elasticsearch | Elasticsearch API documentation} + */ + async putElasticsearch (this: That, params: T.InferencePutElasticsearchRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putElasticsearch (this: That, params: T.InferencePutElasticsearchRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putElasticsearch (this: That, params: T.InferencePutElasticsearchRequest, options?: TransportRequestOptions): Promise + async putElasticsearch (this: That, params: T.InferencePutElasticsearchRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['inference.put_elasticsearch'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} // @ts-expect-error - querystring[key] = params[key] + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } - let method = '' - let path = '' - if (params.task_type != null && params.inference_id != null) { - method = 'POST' + const method = 'PUT' + const path = `/_inference/${encodeURIComponent(params.task_type.toString())}/${encodeURIComponent(params.elasticsearch_inference_id.toString())}` + const meta: TransportRequestMetadata = { + name: 'inference.put_elasticsearch', + pathParts: { + task_type: params.task_type, + elasticsearch_inference_id: params.elasticsearch_inference_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Create an ELSER inference endpoint. Create an inference endpoint to perform an inference task with the `elser` service. You can also deploy ELSER by using the Elasticsearch inference integration. > info > Your Elasticsearch deployment contains a preconfigured ELSER inference endpoint, you only need to create the enpoint using the API if you want to customize the settings. The API request will automatically download and deploy the ELSER model if it isn't already downloaded. > info > You might see a 502 bad gateway error in the response when using the Kibana Console. This error usually just reflects a timeout, while the model downloads in the background. You can check the download progress in the Machine Learning UI. If using the Python client, you can set the timeout parameter to a higher value. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-inference-put-elser | Elasticsearch API documentation} + */ + async putElser (this: That, params: T.InferencePutElserRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putElser (this: That, params: T.InferencePutElserRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putElser (this: That, params: T.InferencePutElserRequest, options?: TransportRequestOptions): Promise + async putElser (this: That, params: T.InferencePutElserRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['inference.put_elser'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'PUT' + const path = `/_inference/${encodeURIComponent(params.task_type.toString())}/${encodeURIComponent(params.elser_inference_id.toString())}` + const meta: TransportRequestMetadata = { + name: 'inference.put_elser', + pathParts: { + task_type: params.task_type, + elser_inference_id: params.elser_inference_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Create an Google AI Studio inference endpoint. Create an inference endpoint to perform an inference task with the `googleaistudio` service. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-inference-put-googleaistudio | Elasticsearch API documentation} + */ + async putGoogleaistudio (this: That, params: T.InferencePutGoogleaistudioRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putGoogleaistudio (this: That, params: T.InferencePutGoogleaistudioRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putGoogleaistudio (this: That, params: T.InferencePutGoogleaistudioRequest, options?: TransportRequestOptions): Promise + async putGoogleaistudio (this: That, params: T.InferencePutGoogleaistudioRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['inference.put_googleaistudio'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'PUT' + const path = `/_inference/${encodeURIComponent(params.task_type.toString())}/${encodeURIComponent(params.googleaistudio_inference_id.toString())}` + const meta: TransportRequestMetadata = { + name: 'inference.put_googleaistudio', + pathParts: { + task_type: params.task_type, + googleaistudio_inference_id: params.googleaistudio_inference_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Create a Google Vertex AI inference endpoint. Create an inference endpoint to perform an inference task with the `googlevertexai` service. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-inference-put-googlevertexai | Elasticsearch API documentation} + */ + async putGooglevertexai (this: That, params: T.InferencePutGooglevertexaiRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putGooglevertexai (this: That, params: T.InferencePutGooglevertexaiRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putGooglevertexai (this: That, params: T.InferencePutGooglevertexaiRequest, options?: TransportRequestOptions): Promise + async putGooglevertexai (this: That, params: T.InferencePutGooglevertexaiRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['inference.put_googlevertexai'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'PUT' + const path = `/_inference/${encodeURIComponent(params.task_type.toString())}/${encodeURIComponent(params.googlevertexai_inference_id.toString())}` + const meta: TransportRequestMetadata = { + name: 'inference.put_googlevertexai', + pathParts: { + task_type: params.task_type, + googlevertexai_inference_id: params.googlevertexai_inference_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Create a Hugging Face inference endpoint. Create an inference endpoint to perform an inference task with the `hugging_face` service. You must first create an inference endpoint on the Hugging Face endpoint page to get an endpoint URL. Select the model you want to use on the new endpoint creation page (for example `intfloat/e5-small-v2`), then select the sentence embeddings task under the advanced configuration section. Create the endpoint and copy the URL after the endpoint initialization has been finished. The following models are recommended for the Hugging Face service: * `all-MiniLM-L6-v2` * `all-MiniLM-L12-v2` * `all-mpnet-base-v2` * `e5-base-v2` * `e5-small-v2` * `multilingual-e5-base` * `multilingual-e5-small` When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-inference-put-hugging-face | Elasticsearch API documentation} + */ + async putHuggingFace (this: That, params: T.InferencePutHuggingFaceRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putHuggingFace (this: That, params: T.InferencePutHuggingFaceRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putHuggingFace (this: That, params: T.InferencePutHuggingFaceRequest, options?: TransportRequestOptions): Promise + async putHuggingFace (this: That, params: T.InferencePutHuggingFaceRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['inference.put_hugging_face'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'PUT' + const path = `/_inference/${encodeURIComponent(params.task_type.toString())}/${encodeURIComponent(params.huggingface_inference_id.toString())}` + const meta: TransportRequestMetadata = { + name: 'inference.put_hugging_face', + pathParts: { + task_type: params.task_type, + huggingface_inference_id: params.huggingface_inference_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Create an JinaAI inference endpoint. Create an inference endpoint to perform an inference task with the `jinaai` service. To review the available `rerank` models, refer to . To review the available `text_embedding` models, refer to the . When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-inference-put-jinaai | Elasticsearch API documentation} + */ + async putJinaai (this: That, params: T.InferencePutJinaaiRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putJinaai (this: That, params: T.InferencePutJinaaiRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putJinaai (this: That, params: T.InferencePutJinaaiRequest, options?: TransportRequestOptions): Promise + async putJinaai (this: That, params: T.InferencePutJinaaiRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['inference.put_jinaai'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'PUT' + const path = `/_inference/${encodeURIComponent(params.task_type.toString())}/${encodeURIComponent(params.jinaai_inference_id.toString())}` + const meta: TransportRequestMetadata = { + name: 'inference.put_jinaai', + pathParts: { + task_type: params.task_type, + jinaai_inference_id: params.jinaai_inference_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Create a Mistral inference endpoint. Creates an inference endpoint to perform an inference task with the `mistral` service. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-inference-put-mistral | Elasticsearch API documentation} + */ + async putMistral (this: That, params: T.InferencePutMistralRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putMistral (this: That, params: T.InferencePutMistralRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putMistral (this: That, params: T.InferencePutMistralRequest, options?: TransportRequestOptions): Promise + async putMistral (this: That, params: T.InferencePutMistralRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['inference.put_mistral'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'PUT' + const path = `/_inference/${encodeURIComponent(params.task_type.toString())}/${encodeURIComponent(params.mistral_inference_id.toString())}` + const meta: TransportRequestMetadata = { + name: 'inference.put_mistral', + pathParts: { + task_type: params.task_type, + mistral_inference_id: params.mistral_inference_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Create an OpenAI inference endpoint. Create an inference endpoint to perform an inference task with the `openai` service. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-inference-put-openai | Elasticsearch API documentation} + */ + async putOpenai (this: That, params: T.InferencePutOpenaiRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putOpenai (this: That, params: T.InferencePutOpenaiRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putOpenai (this: That, params: T.InferencePutOpenaiRequest, options?: TransportRequestOptions): Promise + async putOpenai (this: That, params: T.InferencePutOpenaiRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['inference.put_openai'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'PUT' + const path = `/_inference/${encodeURIComponent(params.task_type.toString())}/${encodeURIComponent(params.openai_inference_id.toString())}` + const meta: TransportRequestMetadata = { + name: 'inference.put_openai', + pathParts: { + task_type: params.task_type, + openai_inference_id: params.openai_inference_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Create a VoyageAI inference endpoint. Create an inference endpoint to perform an inference task with the `voyageai` service. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-inference-put-voyageai | Elasticsearch API documentation} + */ + async putVoyageai (this: That, params: T.InferencePutVoyageaiRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putVoyageai (this: That, params: T.InferencePutVoyageaiRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putVoyageai (this: That, params: T.InferencePutVoyageaiRequest, options?: TransportRequestOptions): Promise + async putVoyageai (this: That, params: T.InferencePutVoyageaiRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['inference.put_voyageai'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'PUT' + const path = `/_inference/${encodeURIComponent(params.task_type.toString())}/${encodeURIComponent(params.voyageai_inference_id.toString())}` + const meta: TransportRequestMetadata = { + name: 'inference.put_voyageai', + pathParts: { + task_type: params.task_type, + voyageai_inference_id: params.voyageai_inference_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Create a Watsonx inference endpoint. Create an inference endpoint to perform an inference task with the `watsonxai` service. You need an IBM Cloud Databases for Elasticsearch deployment to use the `watsonxai` inference service. You can provision one through the IBM catalog, the Cloud Databases CLI plug-in, the Cloud Databases API, or Terraform. When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for `"state": "fully_allocated"` in the response and ensure that the `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-inference-put-watsonx | Elasticsearch API documentation} + */ + async putWatsonx (this: That, params: T.InferencePutWatsonxRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putWatsonx (this: That, params: T.InferencePutWatsonxRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putWatsonx (this: That, params: T.InferencePutWatsonxRequest, options?: TransportRequestOptions): Promise + async putWatsonx (this: That, params: T.InferencePutWatsonxRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['inference.put_watsonx'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'PUT' + const path = `/_inference/${encodeURIComponent(params.task_type.toString())}/${encodeURIComponent(params.watsonx_inference_id.toString())}` + const meta: TransportRequestMetadata = { + name: 'inference.put_watsonx', + pathParts: { + task_type: params.task_type, + watsonx_inference_id: params.watsonx_inference_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Perform rereanking inference on the service + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-inference-inference | Elasticsearch API documentation} + */ + async rerank (this: That, params: T.InferenceRerankRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async rerank (this: That, params: T.InferenceRerankRequest, options?: TransportRequestOptionsWithMeta): Promise> + async rerank (this: That, params: T.InferenceRerankRequest, options?: TransportRequestOptions): Promise + async rerank (this: That, params: T.InferenceRerankRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['inference.rerank'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'POST' + const path = `/_inference/rerank/${encodeURIComponent(params.inference_id.toString())}` + const meta: TransportRequestMetadata = { + name: 'inference.rerank', + pathParts: { + inference_id: params.inference_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Perform sparse embedding inference on the service + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-inference-inference | Elasticsearch API documentation} + */ + async sparseEmbedding (this: That, params: T.InferenceSparseEmbeddingRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async sparseEmbedding (this: That, params: T.InferenceSparseEmbeddingRequest, options?: TransportRequestOptionsWithMeta): Promise> + async sparseEmbedding (this: That, params: T.InferenceSparseEmbeddingRequest, options?: TransportRequestOptions): Promise + async sparseEmbedding (this: That, params: T.InferenceSparseEmbeddingRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['inference.sparse_embedding'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'POST' + const path = `/_inference/sparse_embedding/${encodeURIComponent(params.inference_id.toString())}` + const meta: TransportRequestMetadata = { + name: 'inference.sparse_embedding', + pathParts: { + inference_id: params.inference_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Perform streaming inference. Get real-time responses for completion tasks by delivering answers incrementally, reducing response times during computation. This API works only with the completion task type. IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs. This API requires the `monitor_inference` cluster privilege (the built-in `inference_admin` and `inference_user` roles grant this privilege). You must use a client that supports streaming. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-inference-stream-inference | Elasticsearch API documentation} + */ + async streamCompletion (this: That, params: T.InferenceStreamCompletionRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async streamCompletion (this: That, params: T.InferenceStreamCompletionRequest, options?: TransportRequestOptionsWithMeta): Promise> + async streamCompletion (this: That, params: T.InferenceStreamCompletionRequest, options?: TransportRequestOptions): Promise + async streamCompletion (this: That, params: T.InferenceStreamCompletionRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['inference.stream_completion'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'POST' + const path = `/_inference/completion/${encodeURIComponent(params.inference_id.toString())}/_stream` + const meta: TransportRequestMetadata = { + name: 'inference.stream_completion', + pathParts: { + inference_id: params.inference_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Perform text embedding inference on the service + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-inference-inference | Elasticsearch API documentation} + */ + async textEmbedding (this: That, params: T.InferenceTextEmbeddingRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async textEmbedding (this: That, params: T.InferenceTextEmbeddingRequest, options?: TransportRequestOptionsWithMeta): Promise> + async textEmbedding (this: That, params: T.InferenceTextEmbeddingRequest, options?: TransportRequestOptions): Promise + async textEmbedding (this: That, params: T.InferenceTextEmbeddingRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['inference.text_embedding'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: Record | string | undefined + const userBody = params?.body + if (userBody != null) { + if (typeof userBody === 'string') { + body = userBody + } else { + body = { ...userBody } + } + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + const method = 'POST' + const path = `/_inference/text_embedding/${encodeURIComponent(params.inference_id.toString())}` + const meta: TransportRequestMetadata = { + name: 'inference.text_embedding', + pathParts: { + inference_id: params.inference_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Update an inference endpoint. Modify `task_settings`, secrets (within `service_settings`), or `num_allocations` for an inference endpoint, depending on the specific endpoint service and `task_type`. IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-inference-update | Elasticsearch API documentation} + */ + async update (this: That, params: T.InferenceUpdateRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async update (this: That, params: T.InferenceUpdateRequest, options?: TransportRequestOptionsWithMeta): Promise> + async update (this: That, params: T.InferenceUpdateRequest, options?: TransportRequestOptions): Promise + async update (this: That, params: T.InferenceUpdateRequest, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['inference.update'] + + const userQuery = params?.querystring + const querystring: Record = userQuery != null ? { ...userQuery } : {} + + let body: any = params.body ?? undefined + for (const key in params) { + if (acceptedBody.includes(key)) { + // @ts-expect-error + body = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body' && key !== 'querystring') { + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } + } + } + + let method = '' + let path = '' + if (params.task_type != null && params.inference_id != null) { + method = 'PUT' path = `/_inference/${encodeURIComponent(params.task_type.toString())}/${encodeURIComponent(params.inference_id.toString())}/_update` } else { - method = 'POST' + method = 'PUT' path = `/_inference/${encodeURIComponent(params.inference_id.toString())}/_update` } const meta: TransportRequestMetadata = { diff --git a/src/api/api/info.ts b/src/api/api/info.ts index 1681fe6f3..4f0908193 100644 --- a/src/api/api/info.ts +++ b/src/api/api/info.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,17 +21,31 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const acceptedParams: Record = { + info: { + path: [], + body: [], + query: [] + } +} /** * Get cluster info. Get basic build, version, and cluster information. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-info | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/group/endpoint-info | Elasticsearch API documentation} */ export default async function InfoApi (this: That, params?: T.InfoRequest, options?: TransportRequestOptionsWithOutMeta): Promise export default async function InfoApi (this: That, params?: T.InfoRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function InfoApi (this: That, params?: T.InfoRequest, options?: TransportRequestOptions): Promise export default async function InfoApi (this: That, params?: T.InfoRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = acceptedParams.info + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/ingest.ts b/src/api/api/ingest.ts index 51ad39aff..e4a3c6c84 100644 --- a/src/api/api/ingest.ts +++ b/src/api/api/ingest.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,23 +21,156 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class Ingest { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'ingest.delete_geoip_database': { + path: [ + 'id' + ], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'ingest.delete_ip_location_database': { + path: [ + 'id' + ], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'ingest.delete_pipeline': { + path: [ + 'id' + ], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'ingest.geo_ip_stats': { + path: [], + body: [], + query: [] + }, + 'ingest.get_geoip_database': { + path: [ + 'id' + ], + body: [], + query: [] + }, + 'ingest.get_ip_location_database': { + path: [ + 'id' + ], + body: [], + query: [ + 'master_timeout' + ] + }, + 'ingest.get_pipeline': { + path: [ + 'id' + ], + body: [], + query: [ + 'master_timeout', + 'summary' + ] + }, + 'ingest.processor_grok': { + path: [], + body: [], + query: [] + }, + 'ingest.put_geoip_database': { + path: [ + 'id' + ], + body: [ + 'name', + 'maxmind' + ], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'ingest.put_ip_location_database': { + path: [ + 'id' + ], + body: [ + 'configuration' + ], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'ingest.put_pipeline': { + path: [ + 'id' + ], + body: [ + '_meta', + 'description', + 'on_failure', + 'processors', + 'version', + 'deprecated' + ], + query: [ + 'master_timeout', + 'timeout', + 'if_version' + ] + }, + 'ingest.simulate': { + path: [ + 'id' + ], + body: [ + 'docs', + 'pipeline' + ], + query: [ + 'verbose' + ] + } + } } /** * Delete GeoIP database configurations. Delete one or more IP geolocation database configurations. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-delete-geoip-database | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ingest-delete-geoip-database | Elasticsearch API documentation} */ async deleteGeoipDatabase (this: That, params: T.IngestDeleteGeoipDatabaseRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteGeoipDatabase (this: That, params: T.IngestDeleteGeoipDatabaseRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteGeoipDatabase (this: That, params: T.IngestDeleteGeoipDatabaseRequest, options?: TransportRequestOptions): Promise async deleteGeoipDatabase (this: That, params: T.IngestDeleteGeoipDatabaseRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] + const { + path: acceptedPath + } = this.acceptedParams['ingest.delete_geoip_database'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -87,13 +206,16 @@ export default class Ingest { /** * Delete IP geolocation database configurations. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-delete-ip-location-database | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ingest-delete-ip-location-database | Elasticsearch API documentation} */ async deleteIpLocationDatabase (this: That, params: T.IngestDeleteIpLocationDatabaseRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteIpLocationDatabase (this: That, params: T.IngestDeleteIpLocationDatabaseRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteIpLocationDatabase (this: That, params: T.IngestDeleteIpLocationDatabaseRequest, options?: TransportRequestOptions): Promise async deleteIpLocationDatabase (this: That, params: T.IngestDeleteIpLocationDatabaseRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] + const { + path: acceptedPath + } = this.acceptedParams['ingest.delete_ip_location_database'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -129,13 +251,16 @@ export default class Ingest { /** * Delete pipelines. Delete one or more ingest pipelines. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-delete-pipeline | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ingest-delete-pipeline | Elasticsearch API documentation} */ async deletePipeline (this: That, params: T.IngestDeletePipelineRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deletePipeline (this: That, params: T.IngestDeletePipelineRequest, options?: TransportRequestOptionsWithMeta): Promise> async deletePipeline (this: That, params: T.IngestDeletePipelineRequest, options?: TransportRequestOptions): Promise async deletePipeline (this: That, params: T.IngestDeletePipelineRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] + const { + path: acceptedPath + } = this.acceptedParams['ingest.delete_pipeline'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -171,13 +296,16 @@ export default class Ingest { /** * Get GeoIP statistics. Get download statistics for GeoIP2 databases that are used with the GeoIP processor. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/geoip-processor.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/reference/enrich-processor/geoip-processor | Elasticsearch API documentation} */ async geoIpStats (this: That, params?: T.IngestGeoIpStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async geoIpStats (this: That, params?: T.IngestGeoIpStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> async geoIpStats (this: That, params?: T.IngestGeoIpStatsRequest, options?: TransportRequestOptions): Promise async geoIpStats (this: That, params?: T.IngestGeoIpStatsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['ingest.geo_ip_stats'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -211,13 +339,16 @@ export default class Ingest { /** * Get GeoIP database configurations. Get information about one or more IP geolocation database configurations. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-get-geoip-database | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ingest-get-geoip-database | Elasticsearch API documentation} */ async getGeoipDatabase (this: That, params?: T.IngestGetGeoipDatabaseRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getGeoipDatabase (this: That, params?: T.IngestGetGeoipDatabaseRequest, options?: TransportRequestOptionsWithMeta): Promise> async getGeoipDatabase (this: That, params?: T.IngestGetGeoipDatabaseRequest, options?: TransportRequestOptions): Promise async getGeoipDatabase (this: That, params?: T.IngestGetGeoipDatabaseRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] + const { + path: acceptedPath + } = this.acceptedParams['ingest.get_geoip_database'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -261,13 +392,16 @@ export default class Ingest { /** * Get IP geolocation database configurations. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-get-ip-location-database | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ingest-get-ip-location-database | Elasticsearch API documentation} */ async getIpLocationDatabase (this: That, params?: T.IngestGetIpLocationDatabaseRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getIpLocationDatabase (this: That, params?: T.IngestGetIpLocationDatabaseRequest, options?: TransportRequestOptionsWithMeta): Promise> async getIpLocationDatabase (this: That, params?: T.IngestGetIpLocationDatabaseRequest, options?: TransportRequestOptions): Promise async getIpLocationDatabase (this: That, params?: T.IngestGetIpLocationDatabaseRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] + const { + path: acceptedPath + } = this.acceptedParams['ingest.get_ip_location_database'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -311,13 +445,16 @@ export default class Ingest { /** * Get pipelines. Get information about one or more ingest pipelines. This API returns a local reference of the pipeline. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-get-pipeline | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ingest-get-pipeline | Elasticsearch API documentation} */ async getPipeline (this: That, params?: T.IngestGetPipelineRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getPipeline (this: That, params?: T.IngestGetPipelineRequest, options?: TransportRequestOptionsWithMeta): Promise> async getPipeline (this: That, params?: T.IngestGetPipelineRequest, options?: TransportRequestOptions): Promise async getPipeline (this: That, params?: T.IngestGetPipelineRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] + const { + path: acceptedPath + } = this.acceptedParams['ingest.get_pipeline'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -361,13 +498,16 @@ export default class Ingest { /** * Run a grok processor. Extract structured fields out of a single text field within a document. You must choose which field to extract matched fields from, as well as the grok pattern you expect will match. A grok pattern is like a regular expression that supports aliased expressions that can be reused. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/grok-processor.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/reference/enrich-processor/grok-processor | Elasticsearch API documentation} */ async processorGrok (this: That, params?: T.IngestProcessorGrokRequest, options?: TransportRequestOptionsWithOutMeta): Promise async processorGrok (this: That, params?: T.IngestProcessorGrokRequest, options?: TransportRequestOptionsWithMeta): Promise> async processorGrok (this: That, params?: T.IngestProcessorGrokRequest, options?: TransportRequestOptions): Promise async processorGrok (this: That, params?: T.IngestProcessorGrokRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['ingest.processor_grok'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -401,14 +541,18 @@ export default class Ingest { /** * Create or update a GeoIP database configuration. Refer to the create or update IP geolocation database configuration API. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-put-geoip-database | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ingest-put-geoip-database | Elasticsearch API documentation} */ async putGeoipDatabase (this: That, params: T.IngestPutGeoipDatabaseRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putGeoipDatabase (this: That, params: T.IngestPutGeoipDatabaseRequest, options?: TransportRequestOptionsWithMeta): Promise> async putGeoipDatabase (this: That, params: T.IngestPutGeoipDatabaseRequest, options?: TransportRequestOptions): Promise async putGeoipDatabase (this: That, params: T.IngestPutGeoipDatabaseRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] - const acceptedBody: string[] = ['name', 'maxmind'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ingest.put_geoip_database'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -430,8 +574,14 @@ export default class Ingest { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -448,14 +598,18 @@ export default class Ingest { /** * Create or update an IP geolocation database configuration. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-put-ip-location-database | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ingest-put-ip-location-database | Elasticsearch API documentation} */ async putIpLocationDatabase (this: That, params: T.IngestPutIpLocationDatabaseRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putIpLocationDatabase (this: That, params: T.IngestPutIpLocationDatabaseRequest, options?: TransportRequestOptionsWithMeta): Promise> async putIpLocationDatabase (this: That, params: T.IngestPutIpLocationDatabaseRequest, options?: TransportRequestOptions): Promise async putIpLocationDatabase (this: That, params: T.IngestPutIpLocationDatabaseRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] - const acceptedBody: string[] = ['configuration'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ingest.put_ip_location_database'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -467,8 +621,14 @@ export default class Ingest { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -485,14 +645,18 @@ export default class Ingest { /** * Create or update a pipeline. Changes made using this API take effect immediately. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ingest.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/manage-data/ingest/transform-enrich/ingest-pipelines | Elasticsearch API documentation} */ async putPipeline (this: That, params: T.IngestPutPipelineRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putPipeline (this: That, params: T.IngestPutPipelineRequest, options?: TransportRequestOptionsWithMeta): Promise> async putPipeline (this: That, params: T.IngestPutPipelineRequest, options?: TransportRequestOptions): Promise async putPipeline (this: That, params: T.IngestPutPipelineRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] - const acceptedBody: string[] = ['_meta', 'description', 'on_failure', 'processors', 'version', 'deprecated'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ingest.put_pipeline'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -514,8 +678,14 @@ export default class Ingest { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -532,14 +702,18 @@ export default class Ingest { /** * Simulate a pipeline. Run an ingest pipeline against a set of provided documents. You can either specify an existing pipeline to use with the provided documents or supply a pipeline definition in the body of the request. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ingest-simulate | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ingest-simulate | Elasticsearch API documentation} */ async simulate (this: That, params: T.IngestSimulateRequest, options?: TransportRequestOptionsWithOutMeta): Promise async simulate (this: That, params: T.IngestSimulateRequest, options?: TransportRequestOptionsWithMeta): Promise> async simulate (this: That, params: T.IngestSimulateRequest, options?: TransportRequestOptions): Promise async simulate (this: That, params: T.IngestSimulateRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] - const acceptedBody: string[] = ['docs', 'pipeline'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ingest.simulate'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -561,8 +735,14 @@ export default class Ingest { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/knn_search.ts b/src/api/api/knn_search.ts index d1a319461..e745e11c4 100644 --- a/src/api/api/knn_search.ts +++ b/src/api/api/knn_search.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,18 +21,33 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const acceptedParams: Record = { + knn_search: { + path: [ + 'index' + ], + body: [], + query: [] + } +} /** - * Run a knn search. NOTE: The kNN search API has been replaced by the `knn` option in the search API. Perform a k-nearest neighbor (kNN) search on a dense_vector field and return the matching documents. Given a query vector, the API finds the k closest vectors and returns those documents as search hits. Elasticsearch uses the HNSW algorithm to support efficient kNN search. Like most kNN algorithms, HNSW is an approximate method that sacrifices result accuracy for improved search speed. This means the results returned are not always the true k closest neighbors. The kNN search API supports restricting the search using a filter. The search will return the top k documents that also match the filter query. A kNN search response has the exact same structure as a search API response. However, certain sections have a meaning specific to kNN search: * The document `_score` is determined by the similarity between the query and document vector. * The `hits.total` object contains the total number of nearest neighbor candidates considered, which is `num_candidates * num_shards`. The `hits.total.relation` will always be `eq`, indicating an exact value. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/knn-search-api.html | Elasticsearch API documentation} + * Performs a kNN search. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/9.0/search-search.html | Elasticsearch API documentation} */ -export default async function KnnSearchApi (this: That, params: T.KnnSearchRequest, options?: TransportRequestOptionsWithOutMeta): Promise> -export default async function KnnSearchApi (this: That, params: T.KnnSearchRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> -export default async function KnnSearchApi (this: That, params: T.KnnSearchRequest, options?: TransportRequestOptions): Promise> -export default async function KnnSearchApi (this: That, params: T.KnnSearchRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['_source', 'docvalue_fields', 'stored_fields', 'fields', 'filter', 'knn'] +export default async function KnnSearchApi (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise +export default async function KnnSearchApi (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> +export default async function KnnSearchApi (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise +export default async function KnnSearchApi (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { + const { + path: acceptedPath + } = acceptedParams.knn_search + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -60,15 +61,11 @@ export default async function KnnSearchApi (this: That, par } } + params = params ?? {} for (const key in params) { - if (acceptedBody.includes(key)) { - body = body ?? {} - // @ts-expect-error - body[key] = params[key] - } else if (acceptedPath.includes(key)) { + if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error querystring[key] = params[key] } } diff --git a/src/api/api/license.ts b/src/api/api/license.ts index b80733dd9..4f4a793b8 100644 --- a/src/api/api/license.ts +++ b/src/api/api/license.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,23 +21,91 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class License { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'license.delete': { + path: [], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'license.get': { + path: [], + body: [], + query: [ + 'accept_enterprise', + 'local' + ] + }, + 'license.get_basic_status': { + path: [], + body: [], + query: [] + }, + 'license.get_trial_status': { + path: [], + body: [], + query: [] + }, + 'license.post': { + path: [], + body: [ + 'license', + 'licenses' + ], + query: [ + 'acknowledge', + 'master_timeout', + 'timeout' + ] + }, + 'license.post_start_basic': { + path: [], + body: [], + query: [ + 'acknowledge', + 'master_timeout', + 'timeout' + ] + }, + 'license.post_start_trial': { + path: [], + body: [], + query: [ + 'acknowledge', + 'type_query_string', + 'master_timeout' + ] + } + } } /** * Delete the license. When the license expires, your subscription level reverts to Basic. If the operator privileges feature is enabled, only operator users can use this API. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-license-delete | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-license-delete | Elasticsearch API documentation} */ async delete (this: That, params?: T.LicenseDeleteRequest, options?: TransportRequestOptionsWithOutMeta): Promise async delete (this: That, params?: T.LicenseDeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> async delete (this: That, params?: T.LicenseDeleteRequest, options?: TransportRequestOptions): Promise async delete (this: That, params?: T.LicenseDeleteRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['license.delete'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -85,13 +139,16 @@ export default class License { /** * Get license information. Get information about your Elastic license including its type, its status, when it was issued, and when it expires. >info > If the master node is generating a new cluster state, the get license API may return a `404 Not Found` response. > If you receive an unexpected 404 response after cluster startup, wait a short period and retry the request. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-license-get | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-license-get | Elasticsearch API documentation} */ async get (this: That, params?: T.LicenseGetRequest, options?: TransportRequestOptionsWithOutMeta): Promise async get (this: That, params?: T.LicenseGetRequest, options?: TransportRequestOptionsWithMeta): Promise> async get (this: That, params?: T.LicenseGetRequest, options?: TransportRequestOptions): Promise async get (this: That, params?: T.LicenseGetRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['license.get'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -125,13 +182,16 @@ export default class License { /** * Get the basic license status. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-license-get-basic-status | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-license-get-basic-status | Elasticsearch API documentation} */ async getBasicStatus (this: That, params?: T.LicenseGetBasicStatusRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getBasicStatus (this: That, params?: T.LicenseGetBasicStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> async getBasicStatus (this: That, params?: T.LicenseGetBasicStatusRequest, options?: TransportRequestOptions): Promise async getBasicStatus (this: That, params?: T.LicenseGetBasicStatusRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['license.get_basic_status'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -165,13 +225,16 @@ export default class License { /** * Get the trial status. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-license-get-trial-status | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-license-get-trial-status | Elasticsearch API documentation} */ async getTrialStatus (this: That, params?: T.LicenseGetTrialStatusRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getTrialStatus (this: That, params?: T.LicenseGetTrialStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> async getTrialStatus (this: That, params?: T.LicenseGetTrialStatusRequest, options?: TransportRequestOptions): Promise async getTrialStatus (this: That, params?: T.LicenseGetTrialStatusRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['license.get_trial_status'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -205,14 +268,18 @@ export default class License { /** * Update the license. You can update your license at runtime without shutting down your nodes. License updates take effect immediately. If the license you are installing does not support all of the features that were available with your previous license, however, you are notified in the response. You must then re-submit the API request with the acknowledge parameter set to true. NOTE: If Elasticsearch security features are enabled and you are installing a gold or higher license, you must enable TLS on the transport networking layer before you install the license. If the operator privileges feature is enabled, only operator users can use this API. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-license-post | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-license-post | Elasticsearch API documentation} */ async post (this: That, params?: T.LicensePostRequest, options?: TransportRequestOptionsWithOutMeta): Promise async post (this: That, params?: T.LicensePostRequest, options?: TransportRequestOptionsWithMeta): Promise> async post (this: That, params?: T.LicensePostRequest, options?: TransportRequestOptions): Promise async post (this: That, params?: T.LicensePostRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['license', 'licenses'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['license.post'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -235,8 +302,14 @@ export default class License { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -250,13 +323,16 @@ export default class License { /** * Start a basic license. Start an indefinite basic license, which gives access to all the basic features. NOTE: In order to start a basic license, you must not currently have a basic license. If the basic license does not support all of the features that are available with your current license, however, you are notified in the response. You must then re-submit the API request with the `acknowledge` parameter set to `true`. To check the status of your basic license, use the get basic license API. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-license-post-start-basic | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-license-post-start-basic | Elasticsearch API documentation} */ async postStartBasic (this: That, params?: T.LicensePostStartBasicRequest, options?: TransportRequestOptionsWithOutMeta): Promise async postStartBasic (this: That, params?: T.LicensePostStartBasicRequest, options?: TransportRequestOptionsWithMeta): Promise> async postStartBasic (this: That, params?: T.LicensePostStartBasicRequest, options?: TransportRequestOptions): Promise async postStartBasic (this: That, params?: T.LicensePostStartBasicRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['license.post_start_basic'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -290,13 +366,16 @@ export default class License { /** * Start a trial. Start a 30-day trial, which gives access to all subscription features. NOTE: You are allowed to start a trial only if your cluster has not already activated a trial for the current major product version. For example, if you have already activated a trial for v8.0, you cannot start a new trial until v9.0. You can, however, request an extended trial at https://www.elastic.co/trialextension. To check the status of your trial, use the get trial status API. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-license-post-start-trial | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-license-post-start-trial | Elasticsearch API documentation} */ async postStartTrial (this: That, params?: T.LicensePostStartTrialRequest, options?: TransportRequestOptionsWithOutMeta): Promise async postStartTrial (this: That, params?: T.LicensePostStartTrialRequest, options?: TransportRequestOptionsWithMeta): Promise> async postStartTrial (this: That, params?: T.LicensePostStartTrialRequest, options?: TransportRequestOptions): Promise async postStartTrial (this: That, params?: T.LicensePostStartTrialRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['license.post_start_trial'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/logstash.ts b/src/api/api/logstash.ts index df33e03ac..e7acf0315 100644 --- a/src/api/api/logstash.ts +++ b/src/api/api/logstash.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,23 +21,58 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class Logstash { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'logstash.delete_pipeline': { + path: [ + 'id' + ], + body: [], + query: [] + }, + 'logstash.get_pipeline': { + path: [ + 'id' + ], + body: [], + query: [] + }, + 'logstash.put_pipeline': { + path: [ + 'id' + ], + body: [ + 'pipeline' + ], + query: [] + } + } } /** * Delete a Logstash pipeline. Delete a pipeline that is used for Logstash Central Management. If the request succeeds, you receive an empty response with an appropriate status code. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-logstash-delete-pipeline | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-logstash-delete-pipeline | Elasticsearch API documentation} */ async deletePipeline (this: That, params: T.LogstashDeletePipelineRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deletePipeline (this: That, params: T.LogstashDeletePipelineRequest, options?: TransportRequestOptionsWithMeta): Promise> async deletePipeline (this: That, params: T.LogstashDeletePipelineRequest, options?: TransportRequestOptions): Promise async deletePipeline (this: That, params: T.LogstashDeletePipelineRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] + const { + path: acceptedPath + } = this.acceptedParams['logstash.delete_pipeline'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -87,13 +108,16 @@ export default class Logstash { /** * Get Logstash pipelines. Get pipelines that are used for Logstash Central Management. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-logstash-get-pipeline | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-logstash-get-pipeline | Elasticsearch API documentation} */ async getPipeline (this: That, params?: T.LogstashGetPipelineRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getPipeline (this: That, params?: T.LogstashGetPipelineRequest, options?: TransportRequestOptionsWithMeta): Promise> async getPipeline (this: That, params?: T.LogstashGetPipelineRequest, options?: TransportRequestOptions): Promise async getPipeline (this: That, params?: T.LogstashGetPipelineRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] + const { + path: acceptedPath + } = this.acceptedParams['logstash.get_pipeline'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -137,14 +161,18 @@ export default class Logstash { /** * Create or update a Logstash pipeline. Create a pipeline that is used for Logstash Central Management. If the specified pipeline exists, it is replaced. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-logstash-put-pipeline | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-logstash-put-pipeline | Elasticsearch API documentation} */ async putPipeline (this: That, params: T.LogstashPutPipelineRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putPipeline (this: That, params: T.LogstashPutPipelineRequest, options?: TransportRequestOptionsWithMeta): Promise> async putPipeline (this: That, params: T.LogstashPutPipelineRequest, options?: TransportRequestOptions): Promise async putPipeline (this: That, params: T.LogstashPutPipelineRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] - const acceptedBody: string[] = ['pipeline'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['logstash.put_pipeline'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -156,8 +184,14 @@ export default class Logstash { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/mget.ts b/src/api/api/mget.ts index c254d5fd8..fcd1f2eb1 100644 --- a/src/api/api/mget.ts +++ b/src/api/api/mget.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,18 +21,50 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +const acceptedParams: Record = { + mget: { + path: [ + 'index' + ], + body: [ + 'docs', + 'ids' + ], + query: [ + 'force_synthetic_source', + 'preference', + 'realtime', + 'refresh', + 'routing', + '_source', + '_source_excludes', + '_source_includes', + 'stored_fields' + ] + } +} /** * Get multiple documents. Get multiple JSON documents by ID from one or more indices. If you specify an index in the request URI, you only need to specify the document IDs in the request body. To ensure fast responses, this multi get (mget) API responds with partial results if one or more shards fail. **Filter source fields** By default, the `_source` field is returned for every document (if stored). Use the `_source` and `_source_include` or `source_exclude` attributes to filter what fields are returned for a particular document. You can include the `_source`, `_source_includes`, and `_source_excludes` query parameters in the request URI to specify the defaults to use when there are no per-document instructions. **Get stored fields** Use the `stored_fields` attribute to specify the set of stored fields you want to retrieve. Any requested fields that are not stored are ignored. You can include the `stored_fields` query parameter in the request URI to specify the defaults to use when there are no per-document instructions. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-mget | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-mget | Elasticsearch API documentation} */ export default async function MgetApi (this: That, params?: T.MgetRequest, options?: TransportRequestOptionsWithOutMeta): Promise> export default async function MgetApi (this: That, params?: T.MgetRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> export default async function MgetApi (this: That, params?: T.MgetRequest, options?: TransportRequestOptions): Promise> export default async function MgetApi (this: That, params?: T.MgetRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['docs', 'ids'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = acceptedParams.mget + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -69,8 +87,14 @@ export default async function MgetApi (this: That, params?: } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/migration.ts b/src/api/api/migration.ts index 5ddf19b7d..fac6aedcd 100644 --- a/src/api/api/migration.ts +++ b/src/api/api/migration.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,23 +21,50 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} export default class Migration { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'migration.deprecations': { + path: [ + 'index' + ], + body: [], + query: [] + }, + 'migration.get_feature_upgrade_status': { + path: [], + body: [], + query: [] + }, + 'migration.post_feature_upgrade': { + path: [], + body: [], + query: [] + } + } } /** * Get deprecation information. Get information about different cluster, node, and index level settings that use deprecated features that will be removed or changed in the next major version. TIP: This APIs is designed for indirect use by the Upgrade Assistant. You are strongly recommended to use the Upgrade Assistant. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-migration-deprecations | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-migration-deprecations | Elasticsearch API documentation} */ async deprecations (this: That, params?: T.MigrationDeprecationsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deprecations (this: That, params?: T.MigrationDeprecationsRequest, options?: TransportRequestOptionsWithMeta): Promise> async deprecations (this: That, params?: T.MigrationDeprecationsRequest, options?: TransportRequestOptions): Promise async deprecations (this: That, params?: T.MigrationDeprecationsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] + const { + path: acceptedPath + } = this.acceptedParams['migration.deprecations'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -95,13 +108,16 @@ export default class Migration { /** * Get feature migration information. Version upgrades sometimes require changes to how features store configuration information and data in system indices. Check which features need to be migrated and the status of any migrations that are in progress. TIP: This API is designed for indirect use by the Upgrade Assistant. You are strongly recommended to use the Upgrade Assistant. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-migration-get-feature-upgrade-status | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-migration-get-feature-upgrade-status | Elasticsearch API documentation} */ async getFeatureUpgradeStatus (this: That, params?: T.MigrationGetFeatureUpgradeStatusRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getFeatureUpgradeStatus (this: That, params?: T.MigrationGetFeatureUpgradeStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> async getFeatureUpgradeStatus (this: That, params?: T.MigrationGetFeatureUpgradeStatusRequest, options?: TransportRequestOptions): Promise async getFeatureUpgradeStatus (this: That, params?: T.MigrationGetFeatureUpgradeStatusRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['migration.get_feature_upgrade_status'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -135,13 +151,16 @@ export default class Migration { /** * Start the feature migration. Version upgrades sometimes require changes to how features store configuration information and data in system indices. This API starts the automatic migration process. Some functionality might be temporarily unavailable during the migration process. TIP: The API is designed for indirect use by the Upgrade Assistant. We strongly recommend you use the Upgrade Assistant. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-migration-get-feature-upgrade-status | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-migration-get-feature-upgrade-status | Elasticsearch API documentation} */ async postFeatureUpgrade (this: That, params?: T.MigrationPostFeatureUpgradeRequest, options?: TransportRequestOptionsWithOutMeta): Promise async postFeatureUpgrade (this: That, params?: T.MigrationPostFeatureUpgradeRequest, options?: TransportRequestOptionsWithMeta): Promise> async postFeatureUpgrade (this: That, params?: T.MigrationPostFeatureUpgradeRequest, options?: TransportRequestOptions): Promise async postFeatureUpgrade (this: That, params?: T.MigrationPostFeatureUpgradeRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['migration.post_feature_upgrade'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/ml.ts b/src/api/api/ml.ts index 282fc38a5..d4ef76e64 100644 --- a/src/api/api/ml.ts +++ b/src/api/api/ml.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,23 +21,971 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class Ml { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'ml.clear_trained_model_deployment_cache': { + path: [ + 'model_id' + ], + body: [], + query: [] + }, + 'ml.close_job': { + path: [ + 'job_id' + ], + body: [ + 'allow_no_match', + 'force', + 'timeout' + ], + query: [ + 'allow_no_match', + 'force', + 'timeout' + ] + }, + 'ml.delete_calendar': { + path: [ + 'calendar_id' + ], + body: [], + query: [] + }, + 'ml.delete_calendar_event': { + path: [ + 'calendar_id', + 'event_id' + ], + body: [], + query: [] + }, + 'ml.delete_calendar_job': { + path: [ + 'calendar_id', + 'job_id' + ], + body: [], + query: [] + }, + 'ml.delete_data_frame_analytics': { + path: [ + 'id' + ], + body: [], + query: [ + 'force', + 'timeout' + ] + }, + 'ml.delete_datafeed': { + path: [ + 'datafeed_id' + ], + body: [], + query: [ + 'force' + ] + }, + 'ml.delete_expired_data': { + path: [ + 'job_id' + ], + body: [ + 'requests_per_second', + 'timeout' + ], + query: [ + 'requests_per_second', + 'timeout' + ] + }, + 'ml.delete_filter': { + path: [ + 'filter_id' + ], + body: [], + query: [] + }, + 'ml.delete_forecast': { + path: [ + 'job_id', + 'forecast_id' + ], + body: [], + query: [ + 'allow_no_forecasts', + 'timeout' + ] + }, + 'ml.delete_job': { + path: [ + 'job_id' + ], + body: [], + query: [ + 'force', + 'delete_user_annotations', + 'wait_for_completion' + ] + }, + 'ml.delete_model_snapshot': { + path: [ + 'job_id', + 'snapshot_id' + ], + body: [], + query: [] + }, + 'ml.delete_trained_model': { + path: [ + 'model_id' + ], + body: [], + query: [ + 'force', + 'timeout' + ] + }, + 'ml.delete_trained_model_alias': { + path: [ + 'model_alias', + 'model_id' + ], + body: [], + query: [] + }, + 'ml.estimate_model_memory': { + path: [], + body: [ + 'analysis_config', + 'max_bucket_cardinality', + 'overall_cardinality' + ], + query: [] + }, + 'ml.evaluate_data_frame': { + path: [], + body: [ + 'evaluation', + 'index', + 'query' + ], + query: [] + }, + 'ml.explain_data_frame_analytics': { + path: [ + 'id' + ], + body: [ + 'source', + 'dest', + 'analysis', + 'description', + 'model_memory_limit', + 'max_num_threads', + 'analyzed_fields', + 'allow_lazy_start' + ], + query: [] + }, + 'ml.flush_job': { + path: [ + 'job_id' + ], + body: [ + 'advance_time', + 'calc_interim', + 'end', + 'skip_time', + 'start' + ], + query: [ + 'advance_time', + 'calc_interim', + 'end', + 'skip_time', + 'start' + ] + }, + 'ml.forecast': { + path: [ + 'job_id' + ], + body: [ + 'duration', + 'expires_in', + 'max_model_memory' + ], + query: [ + 'duration', + 'expires_in', + 'max_model_memory' + ] + }, + 'ml.get_buckets': { + path: [ + 'job_id', + 'timestamp' + ], + body: [ + 'anomaly_score', + 'desc', + 'end', + 'exclude_interim', + 'expand', + 'page', + 'sort', + 'start' + ], + query: [ + 'anomaly_score', + 'desc', + 'end', + 'exclude_interim', + 'expand', + 'from', + 'size', + 'sort', + 'start' + ] + }, + 'ml.get_calendar_events': { + path: [ + 'calendar_id' + ], + body: [], + query: [ + 'end', + 'from', + 'job_id', + 'size', + 'start' + ] + }, + 'ml.get_calendars': { + path: [ + 'calendar_id' + ], + body: [ + 'page' + ], + query: [ + 'from', + 'size' + ] + }, + 'ml.get_categories': { + path: [ + 'job_id', + 'category_id' + ], + body: [ + 'page' + ], + query: [ + 'from', + 'partition_field_value', + 'size' + ] + }, + 'ml.get_data_frame_analytics': { + path: [ + 'id' + ], + body: [], + query: [ + 'allow_no_match', + 'from', + 'size', + 'exclude_generated' + ] + }, + 'ml.get_data_frame_analytics_stats': { + path: [ + 'id' + ], + body: [], + query: [ + 'allow_no_match', + 'from', + 'size', + 'verbose' + ] + }, + 'ml.get_datafeed_stats': { + path: [ + 'datafeed_id' + ], + body: [], + query: [ + 'allow_no_match' + ] + }, + 'ml.get_datafeeds': { + path: [ + 'datafeed_id' + ], + body: [], + query: [ + 'allow_no_match', + 'exclude_generated' + ] + }, + 'ml.get_filters': { + path: [ + 'filter_id' + ], + body: [], + query: [ + 'from', + 'size' + ] + }, + 'ml.get_influencers': { + path: [ + 'job_id' + ], + body: [ + 'page' + ], + query: [ + 'desc', + 'end', + 'exclude_interim', + 'influencer_score', + 'from', + 'size', + 'sort', + 'start' + ] + }, + 'ml.get_job_stats': { + path: [ + 'job_id' + ], + body: [], + query: [ + 'allow_no_match' + ] + }, + 'ml.get_jobs': { + path: [ + 'job_id' + ], + body: [], + query: [ + 'allow_no_match', + 'exclude_generated' + ] + }, + 'ml.get_memory_stats': { + path: [ + 'node_id' + ], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'ml.get_model_snapshot_upgrade_stats': { + path: [ + 'job_id', + 'snapshot_id' + ], + body: [], + query: [ + 'allow_no_match' + ] + }, + 'ml.get_model_snapshots': { + path: [ + 'job_id', + 'snapshot_id' + ], + body: [ + 'desc', + 'end', + 'page', + 'sort', + 'start' + ], + query: [ + 'desc', + 'end', + 'from', + 'size', + 'sort', + 'start' + ] + }, + 'ml.get_overall_buckets': { + path: [ + 'job_id' + ], + body: [ + 'allow_no_match', + 'bucket_span', + 'end', + 'exclude_interim', + 'overall_score', + 'start', + 'top_n' + ], + query: [ + 'allow_no_match', + 'bucket_span', + 'end', + 'exclude_interim', + 'overall_score', + 'start', + 'top_n' + ] + }, + 'ml.get_records': { + path: [ + 'job_id' + ], + body: [ + 'desc', + 'end', + 'exclude_interim', + 'page', + 'record_score', + 'sort', + 'start' + ], + query: [ + 'desc', + 'end', + 'exclude_interim', + 'from', + 'record_score', + 'size', + 'sort', + 'start' + ] + }, + 'ml.get_trained_models': { + path: [ + 'model_id' + ], + body: [], + query: [ + 'allow_no_match', + 'decompress_definition', + 'exclude_generated', + 'from', + 'include', + 'size', + 'tags' + ] + }, + 'ml.get_trained_models_stats': { + path: [ + 'model_id' + ], + body: [], + query: [ + 'allow_no_match', + 'from', + 'size' + ] + }, + 'ml.infer_trained_model': { + path: [ + 'model_id' + ], + body: [ + 'docs', + 'inference_config' + ], + query: [ + 'timeout' + ] + }, + 'ml.info': { + path: [], + body: [], + query: [] + }, + 'ml.open_job': { + path: [ + 'job_id' + ], + body: [ + 'timeout' + ], + query: [ + 'timeout' + ] + }, + 'ml.post_calendar_events': { + path: [ + 'calendar_id' + ], + body: [ + 'events' + ], + query: [] + }, + 'ml.post_data': { + path: [ + 'job_id' + ], + body: [ + 'data' + ], + query: [ + 'reset_end', + 'reset_start' + ] + }, + 'ml.preview_data_frame_analytics': { + path: [ + 'id' + ], + body: [ + 'config' + ], + query: [] + }, + 'ml.preview_datafeed': { + path: [ + 'datafeed_id' + ], + body: [ + 'datafeed_config', + 'job_config' + ], + query: [ + 'start', + 'end' + ] + }, + 'ml.put_calendar': { + path: [ + 'calendar_id' + ], + body: [ + 'job_ids', + 'description' + ], + query: [] + }, + 'ml.put_calendar_job': { + path: [ + 'calendar_id', + 'job_id' + ], + body: [], + query: [] + }, + 'ml.put_data_frame_analytics': { + path: [ + 'id' + ], + body: [ + 'allow_lazy_start', + 'analysis', + 'analyzed_fields', + 'description', + 'dest', + 'max_num_threads', + '_meta', + 'model_memory_limit', + 'source', + 'headers', + 'version' + ], + query: [] + }, + 'ml.put_datafeed': { + path: [ + 'datafeed_id' + ], + body: [ + 'aggregations', + 'aggs', + 'chunking_config', + 'delayed_data_check_config', + 'frequency', + 'indices', + 'indexes', + 'indices_options', + 'job_id', + 'max_empty_searches', + 'query', + 'query_delay', + 'runtime_mappings', + 'script_fields', + 'scroll_size', + 'headers' + ], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'ignore_throttled', + 'ignore_unavailable' + ] + }, + 'ml.put_filter': { + path: [ + 'filter_id' + ], + body: [ + 'description', + 'items' + ], + query: [] + }, + 'ml.put_job': { + path: [], + body: [ + 'allow_lazy_open', + 'analysis_config', + 'analysis_limits', + 'background_persist_interval', + 'custom_settings', + 'daily_model_snapshot_retention_after_days', + 'data_description', + 'datafeed_config', + 'description', + 'job_id', + 'groups', + 'model_plot_config', + 'model_snapshot_retention_days', + 'renormalization_window_days', + 'results_index_name', + 'results_retention_days' + ], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'ignore_throttled', + 'ignore_unavailable' + ] + }, + 'ml.put_trained_model': { + path: [ + 'model_id' + ], + body: [ + 'compressed_definition', + 'definition', + 'description', + 'inference_config', + 'input', + 'metadata', + 'model_type', + 'model_size_bytes', + 'platform_architecture', + 'tags', + 'prefix_strings' + ], + query: [ + 'defer_definition_decompression', + 'wait_for_completion' + ] + }, + 'ml.put_trained_model_alias': { + path: [ + 'model_alias', + 'model_id' + ], + body: [], + query: [ + 'reassign' + ] + }, + 'ml.put_trained_model_definition_part': { + path: [ + 'model_id', + 'part' + ], + body: [ + 'definition', + 'total_definition_length', + 'total_parts' + ], + query: [] + }, + 'ml.put_trained_model_vocabulary': { + path: [ + 'model_id' + ], + body: [ + 'vocabulary', + 'merges', + 'scores' + ], + query: [] + }, + 'ml.reset_job': { + path: [ + 'job_id' + ], + body: [], + query: [ + 'wait_for_completion', + 'delete_user_annotations' + ] + }, + 'ml.revert_model_snapshot': { + path: [ + 'job_id', + 'snapshot_id' + ], + body: [ + 'delete_intervening_results' + ], + query: [ + 'delete_intervening_results' + ] + }, + 'ml.set_upgrade_mode': { + path: [], + body: [], + query: [ + 'enabled', + 'timeout' + ] + }, + 'ml.start_data_frame_analytics': { + path: [ + 'id' + ], + body: [], + query: [ + 'timeout' + ] + }, + 'ml.start_datafeed': { + path: [ + 'datafeed_id' + ], + body: [ + 'end', + 'start', + 'timeout' + ], + query: [ + 'end', + 'start', + 'timeout' + ] + }, + 'ml.start_trained_model_deployment': { + path: [ + 'model_id' + ], + body: [ + 'adaptive_allocations' + ], + query: [ + 'cache_size', + 'deployment_id', + 'number_of_allocations', + 'priority', + 'queue_capacity', + 'threads_per_allocation', + 'timeout', + 'wait_for' + ] + }, + 'ml.stop_data_frame_analytics': { + path: [ + 'id' + ], + body: [], + query: [ + 'allow_no_match', + 'force', + 'timeout' + ] + }, + 'ml.stop_datafeed': { + path: [ + 'datafeed_id' + ], + body: [ + 'allow_no_match', + 'force', + 'timeout' + ], + query: [ + 'allow_no_match', + 'force', + 'timeout' + ] + }, + 'ml.stop_trained_model_deployment': { + path: [ + 'model_id' + ], + body: [], + query: [ + 'allow_no_match', + 'force' + ] + }, + 'ml.update_data_frame_analytics': { + path: [ + 'id' + ], + body: [ + 'description', + 'model_memory_limit', + 'max_num_threads', + 'allow_lazy_start' + ], + query: [] + }, + 'ml.update_datafeed': { + path: [ + 'datafeed_id' + ], + body: [ + 'aggregations', + 'chunking_config', + 'delayed_data_check_config', + 'frequency', + 'indices', + 'indexes', + 'indices_options', + 'job_id', + 'max_empty_searches', + 'query', + 'query_delay', + 'runtime_mappings', + 'script_fields', + 'scroll_size' + ], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'ignore_throttled', + 'ignore_unavailable' + ] + }, + 'ml.update_filter': { + path: [ + 'filter_id' + ], + body: [ + 'add_items', + 'description', + 'remove_items' + ], + query: [] + }, + 'ml.update_job': { + path: [ + 'job_id' + ], + body: [ + 'allow_lazy_open', + 'analysis_limits', + 'background_persist_interval', + 'custom_settings', + 'categorization_filters', + 'description', + 'model_plot_config', + 'model_prune_window', + 'daily_model_snapshot_retention_after_days', + 'model_snapshot_retention_days', + 'renormalization_window_days', + 'results_retention_days', + 'groups', + 'detectors', + 'per_partition_categorization' + ], + query: [] + }, + 'ml.update_model_snapshot': { + path: [ + 'job_id', + 'snapshot_id' + ], + body: [ + 'description', + 'retain' + ], + query: [] + }, + 'ml.update_trained_model_deployment': { + path: [ + 'model_id' + ], + body: [ + 'number_of_allocations', + 'adaptive_allocations' + ], + query: [ + 'number_of_allocations' + ] + }, + 'ml.upgrade_job_snapshot': { + path: [ + 'job_id', + 'snapshot_id' + ], + body: [], + query: [ + 'wait_for_completion', + 'timeout' + ] + }, + 'ml.validate': { + path: [], + body: [ + 'job_id', + 'analysis_config', + 'analysis_limits', + 'data_description', + 'description', + 'model_plot', + 'model_snapshot_id', + 'model_snapshot_retention_days', + 'results_index_name' + ], + query: [] + }, + 'ml.validate_detector': { + path: [], + body: [ + 'detector' + ], + query: [] + } + } } /** * Clear trained model deployment cache. Cache will be cleared on all nodes where the trained model is assigned. A trained model deployment may have an inference cache enabled. As requests are handled by each allocated node, their responses may be cached on that individual node. Calling this API clears the caches without restarting the deployment. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-clear-trained-model-deployment-cache | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-clear-trained-model-deployment-cache | Elasticsearch API documentation} */ async clearTrainedModelDeploymentCache (this: That, params: T.MlClearTrainedModelDeploymentCacheRequest, options?: TransportRequestOptionsWithOutMeta): Promise async clearTrainedModelDeploymentCache (this: That, params: T.MlClearTrainedModelDeploymentCacheRequest, options?: TransportRequestOptionsWithMeta): Promise> async clearTrainedModelDeploymentCache (this: That, params: T.MlClearTrainedModelDeploymentCacheRequest, options?: TransportRequestOptions): Promise async clearTrainedModelDeploymentCache (this: That, params: T.MlClearTrainedModelDeploymentCacheRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['model_id'] + const { + path: acceptedPath + } = this.acceptedParams['ml.clear_trained_model_deployment_cache'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -87,14 +1021,18 @@ export default class Ml { /** * Close anomaly detection jobs. A job can be opened and closed multiple times throughout its lifecycle. A closed job cannot receive data or perform analysis operations, but you can still explore and navigate results. When you close a job, it runs housekeeping tasks such as pruning the model history, flushing buffers, calculating final results and persisting the model snapshots. Depending upon the size of the job, it could take several minutes to close and the equivalent time to re-open. After it is closed, the job has a minimal overhead on the cluster except for maintaining its meta data. Therefore it is a best practice to close jobs that are no longer required to process data. If you close an anomaly detection job whose datafeed is running, the request first tries to stop the datafeed. This behavior is equivalent to calling stop datafeed API with the same timeout and force parameters as the close job request. When a datafeed that has a specified end date stops, it automatically closes its associated job. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-close-job | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-close-job | Elasticsearch API documentation} */ async closeJob (this: That, params: T.MlCloseJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise async closeJob (this: That, params: T.MlCloseJobRequest, options?: TransportRequestOptionsWithMeta): Promise> async closeJob (this: That, params: T.MlCloseJobRequest, options?: TransportRequestOptions): Promise async closeJob (this: That, params: T.MlCloseJobRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['job_id'] - const acceptedBody: string[] = ['allow_no_match', 'force', 'timeout'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.close_job'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -116,8 +1054,14 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -134,13 +1078,16 @@ export default class Ml { /** * Delete a calendar. Remove all scheduled events from a calendar, then delete it. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-calendar | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-delete-calendar | Elasticsearch API documentation} */ async deleteCalendar (this: That, params: T.MlDeleteCalendarRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteCalendar (this: That, params: T.MlDeleteCalendarRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteCalendar (this: That, params: T.MlDeleteCalendarRequest, options?: TransportRequestOptions): Promise async deleteCalendar (this: That, params: T.MlDeleteCalendarRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['calendar_id'] + const { + path: acceptedPath + } = this.acceptedParams['ml.delete_calendar'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -176,13 +1123,16 @@ export default class Ml { /** * Delete events from a calendar. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-calendar-event | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-delete-calendar-event | Elasticsearch API documentation} */ async deleteCalendarEvent (this: That, params: T.MlDeleteCalendarEventRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteCalendarEvent (this: That, params: T.MlDeleteCalendarEventRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteCalendarEvent (this: That, params: T.MlDeleteCalendarEventRequest, options?: TransportRequestOptions): Promise async deleteCalendarEvent (this: That, params: T.MlDeleteCalendarEventRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['calendar_id', 'event_id'] + const { + path: acceptedPath + } = this.acceptedParams['ml.delete_calendar_event'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -219,13 +1169,16 @@ export default class Ml { /** * Delete anomaly jobs from a calendar. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-calendar-job | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-delete-calendar-job | Elasticsearch API documentation} */ async deleteCalendarJob (this: That, params: T.MlDeleteCalendarJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteCalendarJob (this: That, params: T.MlDeleteCalendarJobRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteCalendarJob (this: That, params: T.MlDeleteCalendarJobRequest, options?: TransportRequestOptions): Promise async deleteCalendarJob (this: That, params: T.MlDeleteCalendarJobRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['calendar_id', 'job_id'] + const { + path: acceptedPath + } = this.acceptedParams['ml.delete_calendar_job'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -262,13 +1215,16 @@ export default class Ml { /** * Delete a data frame analytics job. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-data-frame-analytics | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-delete-data-frame-analytics | Elasticsearch API documentation} */ async deleteDataFrameAnalytics (this: That, params: T.MlDeleteDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteDataFrameAnalytics (this: That, params: T.MlDeleteDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteDataFrameAnalytics (this: That, params: T.MlDeleteDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise async deleteDataFrameAnalytics (this: That, params: T.MlDeleteDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] + const { + path: acceptedPath + } = this.acceptedParams['ml.delete_data_frame_analytics'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -304,13 +1260,16 @@ export default class Ml { /** * Delete a datafeed. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-datafeed | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-delete-datafeed | Elasticsearch API documentation} */ async deleteDatafeed (this: That, params: T.MlDeleteDatafeedRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteDatafeed (this: That, params: T.MlDeleteDatafeedRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteDatafeed (this: That, params: T.MlDeleteDatafeedRequest, options?: TransportRequestOptions): Promise async deleteDatafeed (this: That, params: T.MlDeleteDatafeedRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['datafeed_id'] + const { + path: acceptedPath + } = this.acceptedParams['ml.delete_datafeed'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -346,14 +1305,18 @@ export default class Ml { /** * Delete expired ML data. Delete all job results, model snapshots and forecast data that have exceeded their retention days period. Machine learning state documents that are not associated with any job are also deleted. You can limit the request to a single or set of anomaly detection jobs by using a job identifier, a group name, a comma-separated list of jobs, or a wildcard expression. You can delete expired data for all anomaly detection jobs by using `_all`, by specifying `*` as the ``, or by omitting the ``. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-expired-data | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-delete-expired-data | Elasticsearch API documentation} */ async deleteExpiredData (this: That, params?: T.MlDeleteExpiredDataRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteExpiredData (this: That, params?: T.MlDeleteExpiredDataRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteExpiredData (this: That, params?: T.MlDeleteExpiredDataRequest, options?: TransportRequestOptions): Promise async deleteExpiredData (this: That, params?: T.MlDeleteExpiredDataRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['job_id'] - const acceptedBody: string[] = ['requests_per_second', 'timeout'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.delete_expired_data'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -376,8 +1339,14 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -401,13 +1370,16 @@ export default class Ml { /** * Delete a filter. If an anomaly detection job references the filter, you cannot delete the filter. You must update or delete the job before you can delete the filter. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-filter | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-delete-filter | Elasticsearch API documentation} */ async deleteFilter (this: That, params: T.MlDeleteFilterRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteFilter (this: That, params: T.MlDeleteFilterRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteFilter (this: That, params: T.MlDeleteFilterRequest, options?: TransportRequestOptions): Promise async deleteFilter (this: That, params: T.MlDeleteFilterRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['filter_id'] + const { + path: acceptedPath + } = this.acceptedParams['ml.delete_filter'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -443,13 +1415,16 @@ export default class Ml { /** * Delete forecasts from a job. By default, forecasts are retained for 14 days. You can specify a different retention period with the `expires_in` parameter in the forecast jobs API. The delete forecast API enables you to delete one or more forecasts before they expire. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-forecast | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-delete-forecast | Elasticsearch API documentation} */ async deleteForecast (this: That, params: T.MlDeleteForecastRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteForecast (this: That, params: T.MlDeleteForecastRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteForecast (this: That, params: T.MlDeleteForecastRequest, options?: TransportRequestOptions): Promise async deleteForecast (this: That, params: T.MlDeleteForecastRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['job_id', 'forecast_id'] + const { + path: acceptedPath + } = this.acceptedParams['ml.delete_forecast'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -493,13 +1468,16 @@ export default class Ml { /** * Delete an anomaly detection job. All job configuration, model state and results are deleted. It is not currently possible to delete multiple jobs using wildcards or a comma separated list. If you delete a job that has a datafeed, the request first tries to delete the datafeed. This behavior is equivalent to calling the delete datafeed API with the same timeout and force parameters as the delete job request. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-job | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-delete-job | Elasticsearch API documentation} */ async deleteJob (this: That, params: T.MlDeleteJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteJob (this: That, params: T.MlDeleteJobRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteJob (this: That, params: T.MlDeleteJobRequest, options?: TransportRequestOptions): Promise async deleteJob (this: That, params: T.MlDeleteJobRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['job_id'] + const { + path: acceptedPath + } = this.acceptedParams['ml.delete_job'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -535,13 +1513,16 @@ export default class Ml { /** * Delete a model snapshot. You cannot delete the active model snapshot. To delete that snapshot, first revert to a different one. To identify the active model snapshot, refer to the `model_snapshot_id` in the results from the get jobs API. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-model-snapshot | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-delete-model-snapshot | Elasticsearch API documentation} */ async deleteModelSnapshot (this: That, params: T.MlDeleteModelSnapshotRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteModelSnapshot (this: That, params: T.MlDeleteModelSnapshotRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteModelSnapshot (this: That, params: T.MlDeleteModelSnapshotRequest, options?: TransportRequestOptions): Promise async deleteModelSnapshot (this: That, params: T.MlDeleteModelSnapshotRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['job_id', 'snapshot_id'] + const { + path: acceptedPath + } = this.acceptedParams['ml.delete_model_snapshot'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -578,13 +1559,16 @@ export default class Ml { /** * Delete an unreferenced trained model. The request deletes a trained inference model that is not referenced by an ingest pipeline. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-trained-model | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-delete-trained-model | Elasticsearch API documentation} */ async deleteTrainedModel (this: That, params: T.MlDeleteTrainedModelRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteTrainedModel (this: That, params: T.MlDeleteTrainedModelRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteTrainedModel (this: That, params: T.MlDeleteTrainedModelRequest, options?: TransportRequestOptions): Promise async deleteTrainedModel (this: That, params: T.MlDeleteTrainedModelRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['model_id'] + const { + path: acceptedPath + } = this.acceptedParams['ml.delete_trained_model'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -620,13 +1604,16 @@ export default class Ml { /** * Delete a trained model alias. This API deletes an existing model alias that refers to a trained model. If the model alias is missing or refers to a model other than the one identified by the `model_id`, this API returns an error. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-delete-trained-model-alias | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-delete-trained-model-alias | Elasticsearch API documentation} */ async deleteTrainedModelAlias (this: That, params: T.MlDeleteTrainedModelAliasRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteTrainedModelAlias (this: That, params: T.MlDeleteTrainedModelAliasRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteTrainedModelAlias (this: That, params: T.MlDeleteTrainedModelAliasRequest, options?: TransportRequestOptions): Promise async deleteTrainedModelAlias (this: That, params: T.MlDeleteTrainedModelAliasRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['model_alias', 'model_id'] + const { + path: acceptedPath + } = this.acceptedParams['ml.delete_trained_model_alias'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -663,14 +1650,18 @@ export default class Ml { /** * Estimate job model memory usage. Make an estimation of the memory usage for an anomaly detection job model. The estimate is based on analysis configuration details for the job and cardinality estimates for the fields it references. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-estimate-model-memory | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-estimate-model-memory | Elasticsearch API documentation} */ async estimateModelMemory (this: That, params?: T.MlEstimateModelMemoryRequest, options?: TransportRequestOptionsWithOutMeta): Promise async estimateModelMemory (this: That, params?: T.MlEstimateModelMemoryRequest, options?: TransportRequestOptionsWithMeta): Promise> async estimateModelMemory (this: That, params?: T.MlEstimateModelMemoryRequest, options?: TransportRequestOptions): Promise async estimateModelMemory (this: That, params?: T.MlEstimateModelMemoryRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['analysis_config', 'max_bucket_cardinality', 'overall_cardinality'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.estimate_model_memory'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -693,8 +1684,14 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -708,14 +1705,18 @@ export default class Ml { /** * Evaluate data frame analytics. The API packages together commonly used evaluation metrics for various types of machine learning features. This has been designed for use on indexes created by data frame analytics. Evaluation requires both a ground truth field and an analytics result field to be present. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-evaluate-data-frame | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-evaluate-data-frame | Elasticsearch API documentation} */ async evaluateDataFrame (this: That, params: T.MlEvaluateDataFrameRequest, options?: TransportRequestOptionsWithOutMeta): Promise async evaluateDataFrame (this: That, params: T.MlEvaluateDataFrameRequest, options?: TransportRequestOptionsWithMeta): Promise> async evaluateDataFrame (this: That, params: T.MlEvaluateDataFrameRequest, options?: TransportRequestOptions): Promise async evaluateDataFrame (this: That, params: T.MlEvaluateDataFrameRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['evaluation', 'index', 'query'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.evaluate_data_frame'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -737,8 +1738,14 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -752,14 +1759,18 @@ export default class Ml { /** * Explain data frame analytics config. This API provides explanations for a data frame analytics config that either exists already or one that has not been created yet. The following explanations are provided: * which fields are included or not in the analysis and why, * how much memory is estimated to be required. The estimate can be used when deciding the appropriate value for model_memory_limit setting later on. If you have object fields or fields that are excluded via source filtering, they are not included in the explanation. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-explain-data-frame-analytics | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-explain-data-frame-analytics | Elasticsearch API documentation} */ async explainDataFrameAnalytics (this: That, params?: T.MlExplainDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async explainDataFrameAnalytics (this: That, params?: T.MlExplainDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> async explainDataFrameAnalytics (this: That, params?: T.MlExplainDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise async explainDataFrameAnalytics (this: That, params?: T.MlExplainDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] - const acceptedBody: string[] = ['source', 'dest', 'analysis', 'description', 'model_memory_limit', 'max_num_threads', 'analyzed_fields', 'allow_lazy_start'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.explain_data_frame_analytics'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -782,8 +1793,14 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -807,14 +1824,18 @@ export default class Ml { /** * Force buffered data to be processed. The flush jobs API is only applicable when sending data for analysis using the post data API. Depending on the content of the buffer, then it might additionally calculate new results. Both flush and close operations are similar, however the flush is more efficient if you are expecting to send more data for analysis. When flushing, the job remains open and is available to continue analyzing data. A close operation additionally prunes and persists the model state to disk and the job must be opened again before analyzing further data. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-flush-job | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-flush-job | Elasticsearch API documentation} */ async flushJob (this: That, params: T.MlFlushJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise async flushJob (this: That, params: T.MlFlushJobRequest, options?: TransportRequestOptionsWithMeta): Promise> async flushJob (this: That, params: T.MlFlushJobRequest, options?: TransportRequestOptions): Promise async flushJob (this: That, params: T.MlFlushJobRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['job_id'] - const acceptedBody: string[] = ['advance_time', 'calc_interim', 'end', 'skip_time', 'start'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.flush_job'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -836,8 +1857,14 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -854,14 +1881,18 @@ export default class Ml { /** * Predict future behavior of a time series. Forecasts are not supported for jobs that perform population analysis; an error occurs if you try to create a forecast for a job that has an `over_field_name` in its configuration. Forcasts predict future behavior based on historical data. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-forecast | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-forecast | Elasticsearch API documentation} */ async forecast (this: That, params: T.MlForecastRequest, options?: TransportRequestOptionsWithOutMeta): Promise async forecast (this: That, params: T.MlForecastRequest, options?: TransportRequestOptionsWithMeta): Promise> async forecast (this: That, params: T.MlForecastRequest, options?: TransportRequestOptions): Promise async forecast (this: That, params: T.MlForecastRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['job_id'] - const acceptedBody: string[] = ['duration', 'expires_in', 'max_model_memory'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.forecast'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -883,8 +1914,14 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -901,14 +1938,18 @@ export default class Ml { /** * Get anomaly detection job results for buckets. The API presents a chronological view of the records, grouped by bucket. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-buckets | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-get-buckets | Elasticsearch API documentation} */ async getBuckets (this: That, params: T.MlGetBucketsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getBuckets (this: That, params: T.MlGetBucketsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getBuckets (this: That, params: T.MlGetBucketsRequest, options?: TransportRequestOptions): Promise async getBuckets (this: That, params: T.MlGetBucketsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['job_id', 'timestamp'] - const acceptedBody: string[] = ['anomaly_score', 'desc', 'end', 'exclude_interim', 'expand', 'page', 'sort', 'start'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.get_buckets'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -930,8 +1971,14 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -956,13 +2003,16 @@ export default class Ml { /** * Get info about events in calendars. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-calendar-events | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-get-calendar-events | Elasticsearch API documentation} */ async getCalendarEvents (this: That, params: T.MlGetCalendarEventsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getCalendarEvents (this: That, params: T.MlGetCalendarEventsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getCalendarEvents (this: That, params: T.MlGetCalendarEventsRequest, options?: TransportRequestOptions): Promise async getCalendarEvents (this: That, params: T.MlGetCalendarEventsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['calendar_id'] + const { + path: acceptedPath + } = this.acceptedParams['ml.get_calendar_events'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -998,14 +2048,18 @@ export default class Ml { /** * Get calendar configuration info. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-calendars | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-get-calendars | Elasticsearch API documentation} */ async getCalendars (this: That, params?: T.MlGetCalendarsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getCalendars (this: That, params?: T.MlGetCalendarsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getCalendars (this: That, params?: T.MlGetCalendarsRequest, options?: TransportRequestOptions): Promise async getCalendars (this: That, params?: T.MlGetCalendarsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['calendar_id'] - const acceptedBody: string[] = ['page'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.get_calendars'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1028,8 +2082,14 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -1053,14 +2113,18 @@ export default class Ml { /** * Get anomaly detection job results for categories. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-categories | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-get-categories | Elasticsearch API documentation} */ async getCategories (this: That, params: T.MlGetCategoriesRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getCategories (this: That, params: T.MlGetCategoriesRequest, options?: TransportRequestOptionsWithMeta): Promise> async getCategories (this: That, params: T.MlGetCategoriesRequest, options?: TransportRequestOptions): Promise async getCategories (this: That, params: T.MlGetCategoriesRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['job_id', 'category_id'] - const acceptedBody: string[] = ['page'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.get_categories'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1082,8 +2146,14 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -1108,13 +2178,16 @@ export default class Ml { /** * Get data frame analytics job configuration info. You can get information for multiple data frame analytics jobs in a single API request by using a comma-separated list of data frame analytics jobs or a wildcard expression. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-data-frame-analytics | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-get-data-frame-analytics | Elasticsearch API documentation} */ async getDataFrameAnalytics (this: That, params?: T.MlGetDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getDataFrameAnalytics (this: That, params?: T.MlGetDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getDataFrameAnalytics (this: That, params?: T.MlGetDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise async getDataFrameAnalytics (this: That, params?: T.MlGetDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] + const { + path: acceptedPath + } = this.acceptedParams['ml.get_data_frame_analytics'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1157,14 +2230,17 @@ export default class Ml { } /** - * Get data frame analytics jobs usage info. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-data-frame-analytics-stats | Elasticsearch API documentation} + * Get data frame analytics job stats. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-get-data-frame-analytics-stats | Elasticsearch API documentation} */ async getDataFrameAnalyticsStats (this: That, params?: T.MlGetDataFrameAnalyticsStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getDataFrameAnalyticsStats (this: That, params?: T.MlGetDataFrameAnalyticsStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getDataFrameAnalyticsStats (this: That, params?: T.MlGetDataFrameAnalyticsStatsRequest, options?: TransportRequestOptions): Promise async getDataFrameAnalyticsStats (this: That, params?: T.MlGetDataFrameAnalyticsStatsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] + const { + path: acceptedPath + } = this.acceptedParams['ml.get_data_frame_analytics_stats'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1207,14 +2283,17 @@ export default class Ml { } /** - * Get datafeeds usage info. You can get statistics for multiple datafeeds in a single API request by using a comma-separated list of datafeeds or a wildcard expression. You can get statistics for all datafeeds by using `_all`, by specifying `*` as the ``, or by omitting the ``. If the datafeed is stopped, the only information you receive is the `datafeed_id` and the `state`. This API returns a maximum of 10,000 datafeeds. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-datafeed-stats | Elasticsearch API documentation} + * Get datafeed stats. You can get statistics for multiple datafeeds in a single API request by using a comma-separated list of datafeeds or a wildcard expression. You can get statistics for all datafeeds by using `_all`, by specifying `*` as the ``, or by omitting the ``. If the datafeed is stopped, the only information you receive is the `datafeed_id` and the `state`. This API returns a maximum of 10,000 datafeeds. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-get-datafeed-stats | Elasticsearch API documentation} */ async getDatafeedStats (this: That, params?: T.MlGetDatafeedStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getDatafeedStats (this: That, params?: T.MlGetDatafeedStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getDatafeedStats (this: That, params?: T.MlGetDatafeedStatsRequest, options?: TransportRequestOptions): Promise async getDatafeedStats (this: That, params?: T.MlGetDatafeedStatsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['datafeed_id'] + const { + path: acceptedPath + } = this.acceptedParams['ml.get_datafeed_stats'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1258,13 +2337,16 @@ export default class Ml { /** * Get datafeeds configuration info. You can get information for multiple datafeeds in a single API request by using a comma-separated list of datafeeds or a wildcard expression. You can get information for all datafeeds by using `_all`, by specifying `*` as the ``, or by omitting the ``. This API returns a maximum of 10,000 datafeeds. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-datafeeds | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-get-datafeeds | Elasticsearch API documentation} */ async getDatafeeds (this: That, params?: T.MlGetDatafeedsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getDatafeeds (this: That, params?: T.MlGetDatafeedsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getDatafeeds (this: That, params?: T.MlGetDatafeedsRequest, options?: TransportRequestOptions): Promise async getDatafeeds (this: That, params?: T.MlGetDatafeedsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['datafeed_id'] + const { + path: acceptedPath + } = this.acceptedParams['ml.get_datafeeds'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1308,13 +2390,16 @@ export default class Ml { /** * Get filters. You can get a single filter or all filters. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-filters | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-get-filters | Elasticsearch API documentation} */ async getFilters (this: That, params?: T.MlGetFiltersRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getFilters (this: That, params?: T.MlGetFiltersRequest, options?: TransportRequestOptionsWithMeta): Promise> async getFilters (this: That, params?: T.MlGetFiltersRequest, options?: TransportRequestOptions): Promise async getFilters (this: That, params?: T.MlGetFiltersRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['filter_id'] + const { + path: acceptedPath + } = this.acceptedParams['ml.get_filters'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1358,14 +2443,18 @@ export default class Ml { /** * Get anomaly detection job results for influencers. Influencers are the entities that have contributed to, or are to blame for, the anomalies. Influencer results are available only if an `influencer_field_name` is specified in the job configuration. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-influencers | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-get-influencers | Elasticsearch API documentation} */ async getInfluencers (this: That, params: T.MlGetInfluencersRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getInfluencers (this: That, params: T.MlGetInfluencersRequest, options?: TransportRequestOptionsWithMeta): Promise> async getInfluencers (this: That, params: T.MlGetInfluencersRequest, options?: TransportRequestOptions): Promise async getInfluencers (this: That, params: T.MlGetInfluencersRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['job_id'] - const acceptedBody: string[] = ['page'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.get_influencers'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1387,8 +2476,14 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -1404,14 +2499,17 @@ export default class Ml { } /** - * Get anomaly detection jobs usage info. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-job-stats | Elasticsearch API documentation} + * Get anomaly detection job stats. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-get-job-stats | Elasticsearch API documentation} */ async getJobStats (this: That, params?: T.MlGetJobStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getJobStats (this: That, params?: T.MlGetJobStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getJobStats (this: That, params?: T.MlGetJobStatsRequest, options?: TransportRequestOptions): Promise async getJobStats (this: That, params?: T.MlGetJobStatsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['job_id'] + const { + path: acceptedPath + } = this.acceptedParams['ml.get_job_stats'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1455,13 +2553,16 @@ export default class Ml { /** * Get anomaly detection jobs configuration info. You can get information for multiple anomaly detection jobs in a single API request by using a group name, a comma-separated list of jobs, or a wildcard expression. You can get information for all anomaly detection jobs by using `_all`, by specifying `*` as the ``, or by omitting the ``. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-jobs | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-get-jobs | Elasticsearch API documentation} */ async getJobs (this: That, params?: T.MlGetJobsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getJobs (this: That, params?: T.MlGetJobsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getJobs (this: That, params?: T.MlGetJobsRequest, options?: TransportRequestOptions): Promise async getJobs (this: That, params?: T.MlGetJobsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['job_id'] + const { + path: acceptedPath + } = this.acceptedParams['ml.get_jobs'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1505,13 +2606,16 @@ export default class Ml { /** * Get machine learning memory usage info. Get information about how machine learning jobs and trained models are using memory, on each node, both within the JVM heap, and natively, outside of the JVM. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-memory-stats | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-get-memory-stats | Elasticsearch API documentation} */ async getMemoryStats (this: That, params?: T.MlGetMemoryStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getMemoryStats (this: That, params?: T.MlGetMemoryStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getMemoryStats (this: That, params?: T.MlGetMemoryStatsRequest, options?: TransportRequestOptions): Promise async getMemoryStats (this: That, params?: T.MlGetMemoryStatsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['node_id'] + const { + path: acceptedPath + } = this.acceptedParams['ml.get_memory_stats'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1555,13 +2659,16 @@ export default class Ml { /** * Get anomaly detection job model snapshot upgrade usage info. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-model-snapshot-upgrade-stats | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-get-model-snapshot-upgrade-stats | Elasticsearch API documentation} */ async getModelSnapshotUpgradeStats (this: That, params: T.MlGetModelSnapshotUpgradeStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getModelSnapshotUpgradeStats (this: That, params: T.MlGetModelSnapshotUpgradeStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getModelSnapshotUpgradeStats (this: That, params: T.MlGetModelSnapshotUpgradeStatsRequest, options?: TransportRequestOptions): Promise async getModelSnapshotUpgradeStats (this: That, params: T.MlGetModelSnapshotUpgradeStatsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['job_id', 'snapshot_id'] + const { + path: acceptedPath + } = this.acceptedParams['ml.get_model_snapshot_upgrade_stats'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1598,14 +2705,18 @@ export default class Ml { /** * Get model snapshots info. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-model-snapshots | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-get-model-snapshots | Elasticsearch API documentation} */ async getModelSnapshots (this: That, params: T.MlGetModelSnapshotsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getModelSnapshots (this: That, params: T.MlGetModelSnapshotsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getModelSnapshots (this: That, params: T.MlGetModelSnapshotsRequest, options?: TransportRequestOptions): Promise async getModelSnapshots (this: That, params: T.MlGetModelSnapshotsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['job_id', 'snapshot_id'] - const acceptedBody: string[] = ['desc', 'end', 'page', 'sort', 'start'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.get_model_snapshots'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1627,8 +2738,14 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -1653,14 +2770,18 @@ export default class Ml { /** * Get overall bucket results. Retrievs overall bucket results that summarize the bucket results of multiple anomaly detection jobs. The `overall_score` is calculated by combining the scores of all the buckets within the overall bucket span. First, the maximum `anomaly_score` per anomaly detection job in the overall bucket is calculated. Then the `top_n` of those scores are averaged to result in the `overall_score`. This means that you can fine-tune the `overall_score` so that it is more or less sensitive to the number of jobs that detect an anomaly at the same time. For example, if you set `top_n` to `1`, the `overall_score` is the maximum bucket score in the overall bucket. Alternatively, if you set `top_n` to the number of jobs, the `overall_score` is high only when all jobs detect anomalies in that overall bucket. If you set the `bucket_span` parameter (to a value greater than its default), the `overall_score` is the maximum `overall_score` of the overall buckets that have a span equal to the jobs' largest bucket span. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-overall-buckets | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-get-overall-buckets | Elasticsearch API documentation} */ async getOverallBuckets (this: That, params: T.MlGetOverallBucketsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getOverallBuckets (this: That, params: T.MlGetOverallBucketsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getOverallBuckets (this: That, params: T.MlGetOverallBucketsRequest, options?: TransportRequestOptions): Promise async getOverallBuckets (this: That, params: T.MlGetOverallBucketsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['job_id'] - const acceptedBody: string[] = ['allow_no_match', 'bucket_span', 'end', 'exclude_interim', 'overall_score', 'start', 'top_n'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.get_overall_buckets'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1682,8 +2803,14 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -1700,14 +2827,18 @@ export default class Ml { /** * Get anomaly records for an anomaly detection job. Records contain the detailed analytical results. They describe the anomalous activity that has been identified in the input data based on the detector configuration. There can be many anomaly records depending on the characteristics and size of the input data. In practice, there are often too many to be able to manually process them. The machine learning features therefore perform a sophisticated aggregation of the anomaly records into buckets. The number of record results depends on the number of anomalies found in each bucket, which relates to the number of time series being modeled and the number of detectors. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-records | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-get-records | Elasticsearch API documentation} */ async getRecords (this: That, params: T.MlGetRecordsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getRecords (this: That, params: T.MlGetRecordsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getRecords (this: That, params: T.MlGetRecordsRequest, options?: TransportRequestOptions): Promise async getRecords (this: That, params: T.MlGetRecordsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['job_id'] - const acceptedBody: string[] = ['desc', 'end', 'exclude_interim', 'page', 'record_score', 'sort', 'start'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.get_records'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1729,8 +2860,14 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -1747,13 +2884,16 @@ export default class Ml { /** * Get trained model configuration info. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-trained-models | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-get-trained-models | Elasticsearch API documentation} */ async getTrainedModels (this: That, params?: T.MlGetTrainedModelsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getTrainedModels (this: That, params?: T.MlGetTrainedModelsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getTrainedModels (this: That, params?: T.MlGetTrainedModelsRequest, options?: TransportRequestOptions): Promise async getTrainedModels (this: That, params?: T.MlGetTrainedModelsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['model_id'] + const { + path: acceptedPath + } = this.acceptedParams['ml.get_trained_models'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1797,13 +2937,16 @@ export default class Ml { /** * Get trained models usage info. You can get usage information for multiple trained models in a single API request by using a comma-separated list of model IDs or a wildcard expression. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-get-trained-models-stats | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-get-trained-models-stats | Elasticsearch API documentation} */ async getTrainedModelsStats (this: That, params?: T.MlGetTrainedModelsStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getTrainedModelsStats (this: That, params?: T.MlGetTrainedModelsStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getTrainedModelsStats (this: That, params?: T.MlGetTrainedModelsStatsRequest, options?: TransportRequestOptions): Promise async getTrainedModelsStats (this: That, params?: T.MlGetTrainedModelsStatsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['model_id'] + const { + path: acceptedPath + } = this.acceptedParams['ml.get_trained_models_stats'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1847,14 +2990,18 @@ export default class Ml { /** * Evaluate a trained model. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-infer-trained-model | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-infer-trained-model | Elasticsearch API documentation} */ async inferTrainedModel (this: That, params: T.MlInferTrainedModelRequest, options?: TransportRequestOptionsWithOutMeta): Promise async inferTrainedModel (this: That, params: T.MlInferTrainedModelRequest, options?: TransportRequestOptionsWithMeta): Promise> async inferTrainedModel (this: That, params: T.MlInferTrainedModelRequest, options?: TransportRequestOptions): Promise async inferTrainedModel (this: That, params: T.MlInferTrainedModelRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['model_id'] - const acceptedBody: string[] = ['docs', 'inference_config'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.infer_trained_model'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1876,8 +3023,14 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -1894,13 +3047,16 @@ export default class Ml { /** * Get machine learning information. Get defaults and limits used by machine learning. This endpoint is designed to be used by a user interface that needs to fully understand machine learning configurations where some options are not specified, meaning that the defaults should be used. This endpoint may be used to find out what those defaults are. It also provides information about the maximum size of machine learning jobs that could run in the current cluster configuration. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-info | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-info | Elasticsearch API documentation} */ async info (this: That, params?: T.MlInfoRequest, options?: TransportRequestOptionsWithOutMeta): Promise async info (this: That, params?: T.MlInfoRequest, options?: TransportRequestOptionsWithMeta): Promise> async info (this: That, params?: T.MlInfoRequest, options?: TransportRequestOptions): Promise async info (this: That, params?: T.MlInfoRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['ml.info'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1934,14 +3090,18 @@ export default class Ml { /** * Open anomaly detection jobs. An anomaly detection job must be opened to be ready to receive and analyze data. It can be opened and closed multiple times throughout its lifecycle. When you open a new job, it starts with an empty model. When you open an existing job, the most recent model state is automatically loaded. The job is ready to resume its analysis from where it left off, once new data is received. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-open-job | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-open-job | Elasticsearch API documentation} */ async openJob (this: That, params: T.MlOpenJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise async openJob (this: That, params: T.MlOpenJobRequest, options?: TransportRequestOptionsWithMeta): Promise> async openJob (this: That, params: T.MlOpenJobRequest, options?: TransportRequestOptions): Promise async openJob (this: That, params: T.MlOpenJobRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['job_id'] - const acceptedBody: string[] = ['timeout'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.open_job'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1963,8 +3123,14 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -1981,14 +3147,18 @@ export default class Ml { /** * Add scheduled events to the calendar. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-post-calendar-events | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-post-calendar-events | Elasticsearch API documentation} */ async postCalendarEvents (this: That, params: T.MlPostCalendarEventsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async postCalendarEvents (this: That, params: T.MlPostCalendarEventsRequest, options?: TransportRequestOptionsWithMeta): Promise> async postCalendarEvents (this: That, params: T.MlPostCalendarEventsRequest, options?: TransportRequestOptions): Promise async postCalendarEvents (this: That, params: T.MlPostCalendarEventsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['calendar_id'] - const acceptedBody: string[] = ['events'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.post_calendar_events'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2010,8 +3180,14 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2028,14 +3204,18 @@ export default class Ml { /** * Send data to an anomaly detection job for analysis. IMPORTANT: For each job, data can be accepted from only a single connection at a time. It is not currently possible to post data to multiple jobs using wildcards or a comma-separated list. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-post-data | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-post-data | Elasticsearch API documentation} */ async postData (this: That, params: T.MlPostDataRequest, options?: TransportRequestOptionsWithOutMeta): Promise async postData (this: That, params: T.MlPostDataRequest, options?: TransportRequestOptionsWithMeta): Promise> async postData (this: That, params: T.MlPostDataRequest, options?: TransportRequestOptions): Promise async postData (this: That, params: T.MlPostDataRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['job_id'] - const acceptedBody: string[] = ['data'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.post_data'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2047,8 +3227,14 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2065,14 +3251,18 @@ export default class Ml { /** * Preview features used by data frame analytics. Preview the extracted features used by a data frame analytics config. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-preview-data-frame-analytics | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-preview-data-frame-analytics | Elasticsearch API documentation} */ async previewDataFrameAnalytics (this: That, params?: T.MlPreviewDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async previewDataFrameAnalytics (this: That, params?: T.MlPreviewDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> async previewDataFrameAnalytics (this: That, params?: T.MlPreviewDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise async previewDataFrameAnalytics (this: That, params?: T.MlPreviewDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] - const acceptedBody: string[] = ['config'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.preview_data_frame_analytics'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2095,8 +3285,14 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2120,14 +3316,18 @@ export default class Ml { /** * Preview a datafeed. This API returns the first "page" of search results from a datafeed. You can preview an existing datafeed or provide configuration details for a datafeed and anomaly detection job in the API. The preview shows the structure of the data that will be passed to the anomaly detection engine. IMPORTANT: When Elasticsearch security features are enabled, the preview uses the credentials of the user that called the API. However, when the datafeed starts it uses the roles of the last user that created or updated the datafeed. To get a preview that accurately reflects the behavior of the datafeed, use the appropriate credentials. You can also use secondary authorization headers to supply the credentials. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-preview-datafeed | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-preview-datafeed | Elasticsearch API documentation} */ async previewDatafeed (this: That, params?: T.MlPreviewDatafeedRequest, options?: TransportRequestOptionsWithOutMeta): Promise> async previewDatafeed (this: That, params?: T.MlPreviewDatafeedRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> async previewDatafeed (this: That, params?: T.MlPreviewDatafeedRequest, options?: TransportRequestOptions): Promise> async previewDatafeed (this: That, params?: T.MlPreviewDatafeedRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['datafeed_id'] - const acceptedBody: string[] = ['datafeed_config', 'job_config'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.preview_datafeed'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2150,8 +3350,14 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2175,14 +3381,18 @@ export default class Ml { /** * Create a calendar. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-calendar | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-put-calendar | Elasticsearch API documentation} */ async putCalendar (this: That, params: T.MlPutCalendarRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putCalendar (this: That, params: T.MlPutCalendarRequest, options?: TransportRequestOptionsWithMeta): Promise> async putCalendar (this: That, params: T.MlPutCalendarRequest, options?: TransportRequestOptions): Promise async putCalendar (this: That, params: T.MlPutCalendarRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['calendar_id'] - const acceptedBody: string[] = ['job_ids', 'description'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.put_calendar'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2204,8 +3414,14 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2222,13 +3438,16 @@ export default class Ml { /** * Add anomaly detection job to calendar. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-calendar-job | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-put-calendar-job | Elasticsearch API documentation} */ async putCalendarJob (this: That, params: T.MlPutCalendarJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putCalendarJob (this: That, params: T.MlPutCalendarJobRequest, options?: TransportRequestOptionsWithMeta): Promise> async putCalendarJob (this: That, params: T.MlPutCalendarJobRequest, options?: TransportRequestOptions): Promise async putCalendarJob (this: That, params: T.MlPutCalendarJobRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['calendar_id', 'job_id'] + const { + path: acceptedPath + } = this.acceptedParams['ml.put_calendar_job'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2265,14 +3484,18 @@ export default class Ml { /** * Create a data frame analytics job. This API creates a data frame analytics job that performs an analysis on the source indices and stores the outcome in a destination index. By default, the query used in the source configuration is `{"match_all": {}}`. If the destination index does not exist, it is created automatically when you start the job. If you supply only a subset of the regression or classification parameters, hyperparameter optimization occurs. It determines a value for each of the undefined parameters. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-data-frame-analytics | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-put-data-frame-analytics | Elasticsearch API documentation} */ async putDataFrameAnalytics (this: That, params: T.MlPutDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putDataFrameAnalytics (this: That, params: T.MlPutDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> async putDataFrameAnalytics (this: That, params: T.MlPutDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise async putDataFrameAnalytics (this: That, params: T.MlPutDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] - const acceptedBody: string[] = ['allow_lazy_start', 'analysis', 'analyzed_fields', 'description', 'dest', 'max_num_threads', '_meta', 'model_memory_limit', 'source', 'headers', 'version'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.put_data_frame_analytics'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2294,8 +3517,14 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2312,14 +3541,18 @@ export default class Ml { /** * Create a datafeed. Datafeeds retrieve data from Elasticsearch for analysis by an anomaly detection job. You can associate only one datafeed with each anomaly detection job. The datafeed contains a query that runs at a defined interval (`frequency`). If you are concerned about delayed data, you can add a delay (`query_delay') at each interval. By default, the datafeed uses the following query: `{"match_all": {"boost": 1}}`. When Elasticsearch security features are enabled, your datafeed remembers which roles the user who created it had at the time of creation and runs the query using those same roles. If you provide secondary authorization headers, those credentials are used instead. You must use Kibana, this API, or the create anomaly detection jobs API to create a datafeed. Do not add a datafeed directly to the `.ml-config` index. Do not give users `write` privileges on the `.ml-config` index. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-datafeed | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-put-datafeed | Elasticsearch API documentation} */ async putDatafeed (this: That, params: T.MlPutDatafeedRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putDatafeed (this: That, params: T.MlPutDatafeedRequest, options?: TransportRequestOptionsWithMeta): Promise> async putDatafeed (this: That, params: T.MlPutDatafeedRequest, options?: TransportRequestOptions): Promise async putDatafeed (this: That, params: T.MlPutDatafeedRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['datafeed_id'] - const acceptedBody: string[] = ['aggregations', 'aggs', 'chunking_config', 'delayed_data_check_config', 'frequency', 'indices', 'indexes', 'indices_options', 'job_id', 'max_empty_searches', 'query', 'query_delay', 'runtime_mappings', 'script_fields', 'scroll_size', 'headers'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.put_datafeed'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2341,8 +3574,14 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2359,14 +3598,18 @@ export default class Ml { /** * Create a filter. A filter contains a list of strings. It can be used by one or more anomaly detection jobs. Specifically, filters are referenced in the `custom_rules` property of detector configuration objects. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-filter | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-put-filter | Elasticsearch API documentation} */ async putFilter (this: That, params: T.MlPutFilterRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putFilter (this: That, params: T.MlPutFilterRequest, options?: TransportRequestOptionsWithMeta): Promise> async putFilter (this: That, params: T.MlPutFilterRequest, options?: TransportRequestOptions): Promise async putFilter (this: That, params: T.MlPutFilterRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['filter_id'] - const acceptedBody: string[] = ['description', 'items'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.put_filter'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2388,8 +3631,14 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2406,14 +3655,18 @@ export default class Ml { /** * Create an anomaly detection job. If you include a `datafeed_config`, you must have read index privileges on the source index. If you include a `datafeed_config` but do not provide a query, the datafeed uses `{"match_all": {"boost": 1}}`. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-job | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-put-job | Elasticsearch API documentation} */ async putJob (this: That, params: T.MlPutJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putJob (this: That, params: T.MlPutJobRequest, options?: TransportRequestOptionsWithMeta): Promise> async putJob (this: That, params: T.MlPutJobRequest, options?: TransportRequestOptions): Promise async putJob (this: That, params: T.MlPutJobRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['allow_lazy_open', 'analysis_config', 'analysis_limits', 'background_persist_interval', 'custom_settings', 'daily_model_snapshot_retention_after_days', 'data_description', 'datafeed_config', 'description', 'job_id', 'groups', 'model_plot_config', 'model_snapshot_retention_days', 'renormalization_window_days', 'results_index_name', 'results_retention_days'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.put_job'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2435,8 +3688,14 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2453,14 +3712,18 @@ export default class Ml { /** * Create a trained model. Enable you to supply a trained model that is not created by data frame analytics. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-trained-model | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-put-trained-model | Elasticsearch API documentation} */ async putTrainedModel (this: That, params: T.MlPutTrainedModelRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putTrainedModel (this: That, params: T.MlPutTrainedModelRequest, options?: TransportRequestOptionsWithMeta): Promise> async putTrainedModel (this: That, params: T.MlPutTrainedModelRequest, options?: TransportRequestOptions): Promise async putTrainedModel (this: That, params: T.MlPutTrainedModelRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['model_id'] - const acceptedBody: string[] = ['compressed_definition', 'definition', 'description', 'inference_config', 'input', 'metadata', 'model_type', 'model_size_bytes', 'platform_architecture', 'tags', 'prefix_strings'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.put_trained_model'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2482,8 +3745,14 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2500,13 +3769,16 @@ export default class Ml { /** * Create or update a trained model alias. A trained model alias is a logical name used to reference a single trained model. You can use aliases instead of trained model identifiers to make it easier to reference your models. For example, you can use aliases in inference aggregations and processors. An alias must be unique and refer to only a single trained model. However, you can have multiple aliases for each trained model. If you use this API to update an alias such that it references a different trained model ID and the model uses a different type of data frame analytics, an error occurs. For example, this situation occurs if you have a trained model for regression analysis and a trained model for classification analysis; you cannot reassign an alias from one type of trained model to another. If you use this API to update an alias and there are very few input fields in common between the old and new trained models for the model alias, the API returns a warning. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-trained-model-alias | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-put-trained-model-alias | Elasticsearch API documentation} */ async putTrainedModelAlias (this: That, params: T.MlPutTrainedModelAliasRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putTrainedModelAlias (this: That, params: T.MlPutTrainedModelAliasRequest, options?: TransportRequestOptionsWithMeta): Promise> async putTrainedModelAlias (this: That, params: T.MlPutTrainedModelAliasRequest, options?: TransportRequestOptions): Promise async putTrainedModelAlias (this: That, params: T.MlPutTrainedModelAliasRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['model_alias', 'model_id'] + const { + path: acceptedPath + } = this.acceptedParams['ml.put_trained_model_alias'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2543,14 +3815,18 @@ export default class Ml { /** * Create part of a trained model definition. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-trained-model-definition-part | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-put-trained-model-definition-part | Elasticsearch API documentation} */ async putTrainedModelDefinitionPart (this: That, params: T.MlPutTrainedModelDefinitionPartRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putTrainedModelDefinitionPart (this: That, params: T.MlPutTrainedModelDefinitionPartRequest, options?: TransportRequestOptionsWithMeta): Promise> async putTrainedModelDefinitionPart (this: That, params: T.MlPutTrainedModelDefinitionPartRequest, options?: TransportRequestOptions): Promise async putTrainedModelDefinitionPart (this: That, params: T.MlPutTrainedModelDefinitionPartRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['model_id', 'part'] - const acceptedBody: string[] = ['definition', 'total_definition_length', 'total_parts'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.put_trained_model_definition_part'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2572,8 +3848,14 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2591,14 +3873,18 @@ export default class Ml { /** * Create a trained model vocabulary. This API is supported only for natural language processing (NLP) models. The vocabulary is stored in the index as described in `inference_config.*.vocabulary` of the trained model definition. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-trained-model-vocabulary | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-put-trained-model-vocabulary | Elasticsearch API documentation} */ async putTrainedModelVocabulary (this: That, params: T.MlPutTrainedModelVocabularyRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putTrainedModelVocabulary (this: That, params: T.MlPutTrainedModelVocabularyRequest, options?: TransportRequestOptionsWithMeta): Promise> async putTrainedModelVocabulary (this: That, params: T.MlPutTrainedModelVocabularyRequest, options?: TransportRequestOptions): Promise async putTrainedModelVocabulary (this: That, params: T.MlPutTrainedModelVocabularyRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['model_id'] - const acceptedBody: string[] = ['vocabulary', 'merges', 'scores'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.put_trained_model_vocabulary'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2620,8 +3906,14 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2638,13 +3930,16 @@ export default class Ml { /** * Reset an anomaly detection job. All model state and results are deleted. The job is ready to start over as if it had just been created. It is not currently possible to reset multiple jobs using wildcards or a comma separated list. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-reset-job | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-reset-job | Elasticsearch API documentation} */ async resetJob (this: That, params: T.MlResetJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise async resetJob (this: That, params: T.MlResetJobRequest, options?: TransportRequestOptionsWithMeta): Promise> async resetJob (this: That, params: T.MlResetJobRequest, options?: TransportRequestOptions): Promise async resetJob (this: That, params: T.MlResetJobRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['job_id'] + const { + path: acceptedPath + } = this.acceptedParams['ml.reset_job'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2680,14 +3975,18 @@ export default class Ml { /** * Revert to a snapshot. The machine learning features react quickly to anomalous input, learning new behaviors in data. Highly anomalous input increases the variance in the models whilst the system learns whether this is a new step-change in behavior or a one-off event. In the case where this anomalous input is known to be a one-off, then it might be appropriate to reset the model state to a time before this event. For example, you might consider reverting to a saved snapshot after Black Friday or a critical system failure. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-revert-model-snapshot | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-revert-model-snapshot | Elasticsearch API documentation} */ async revertModelSnapshot (this: That, params: T.MlRevertModelSnapshotRequest, options?: TransportRequestOptionsWithOutMeta): Promise async revertModelSnapshot (this: That, params: T.MlRevertModelSnapshotRequest, options?: TransportRequestOptionsWithMeta): Promise> async revertModelSnapshot (this: That, params: T.MlRevertModelSnapshotRequest, options?: TransportRequestOptions): Promise async revertModelSnapshot (this: That, params: T.MlRevertModelSnapshotRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['job_id', 'snapshot_id'] - const acceptedBody: string[] = ['delete_intervening_results'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.revert_model_snapshot'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2709,8 +4008,14 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2728,13 +4033,16 @@ export default class Ml { /** * Set upgrade_mode for ML indices. Sets a cluster wide upgrade_mode setting that prepares machine learning indices for an upgrade. When upgrading your cluster, in some circumstances you must restart your nodes and reindex your machine learning indices. In those circumstances, there must be no machine learning jobs running. You can close the machine learning jobs, do the upgrade, then open all the jobs again. Alternatively, you can use this API to temporarily halt tasks associated with the jobs and datafeeds and prevent new jobs from opening. You can also use this API during upgrades that do not require you to reindex your machine learning indices, though stopping jobs is not a requirement in that case. You can see the current value for the upgrade_mode setting by using the get machine learning info API. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-set-upgrade-mode | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-set-upgrade-mode | Elasticsearch API documentation} */ async setUpgradeMode (this: That, params?: T.MlSetUpgradeModeRequest, options?: TransportRequestOptionsWithOutMeta): Promise async setUpgradeMode (this: That, params?: T.MlSetUpgradeModeRequest, options?: TransportRequestOptionsWithMeta): Promise> async setUpgradeMode (this: That, params?: T.MlSetUpgradeModeRequest, options?: TransportRequestOptions): Promise async setUpgradeMode (this: That, params?: T.MlSetUpgradeModeRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['ml.set_upgrade_mode'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2768,13 +4076,16 @@ export default class Ml { /** * Start a data frame analytics job. A data frame analytics job can be started and stopped multiple times throughout its lifecycle. If the destination index does not exist, it is created automatically the first time you start the data frame analytics job. The `index.number_of_shards` and `index.number_of_replicas` settings for the destination index are copied from the source index. If there are multiple source indices, the destination index copies the highest setting values. The mappings for the destination index are also copied from the source indices. If there are any mapping conflicts, the job fails to start. If the destination index exists, it is used as is. You can therefore set up the destination index in advance with custom settings and mappings. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-start-data-frame-analytics | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-start-data-frame-analytics | Elasticsearch API documentation} */ async startDataFrameAnalytics (this: That, params: T.MlStartDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async startDataFrameAnalytics (this: That, params: T.MlStartDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> async startDataFrameAnalytics (this: That, params: T.MlStartDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise async startDataFrameAnalytics (this: That, params: T.MlStartDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] + const { + path: acceptedPath + } = this.acceptedParams['ml.start_data_frame_analytics'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2810,14 +4121,18 @@ export default class Ml { /** * Start datafeeds. A datafeed must be started in order to retrieve data from Elasticsearch. A datafeed can be started and stopped multiple times throughout its lifecycle. Before you can start a datafeed, the anomaly detection job must be open. Otherwise, an error occurs. If you restart a stopped datafeed, it continues processing input data from the next millisecond after it was stopped. If new data was indexed for that exact millisecond between stopping and starting, it will be ignored. When Elasticsearch security features are enabled, your datafeed remembers which roles the last user to create or update it had at the time of creation or update and runs the query using those same roles. If you provided secondary authorization headers when you created or updated the datafeed, those credentials are used instead. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-start-datafeed | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-start-datafeed | Elasticsearch API documentation} */ async startDatafeed (this: That, params: T.MlStartDatafeedRequest, options?: TransportRequestOptionsWithOutMeta): Promise async startDatafeed (this: That, params: T.MlStartDatafeedRequest, options?: TransportRequestOptionsWithMeta): Promise> async startDatafeed (this: That, params: T.MlStartDatafeedRequest, options?: TransportRequestOptions): Promise async startDatafeed (this: That, params: T.MlStartDatafeedRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['datafeed_id'] - const acceptedBody: string[] = ['end', 'start', 'timeout'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.start_datafeed'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2839,8 +4154,14 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2857,14 +4178,18 @@ export default class Ml { /** * Start a trained model deployment. It allocates the model to every machine learning node. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-start-trained-model-deployment | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-start-trained-model-deployment | Elasticsearch API documentation} */ async startTrainedModelDeployment (this: That, params: T.MlStartTrainedModelDeploymentRequest, options?: TransportRequestOptionsWithOutMeta): Promise async startTrainedModelDeployment (this: That, params: T.MlStartTrainedModelDeploymentRequest, options?: TransportRequestOptionsWithMeta): Promise> async startTrainedModelDeployment (this: That, params: T.MlStartTrainedModelDeploymentRequest, options?: TransportRequestOptions): Promise async startTrainedModelDeployment (this: That, params: T.MlStartTrainedModelDeploymentRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['model_id'] - const acceptedBody: string[] = ['adaptive_allocations'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.start_trained_model_deployment'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2886,8 +4211,14 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2904,13 +4235,16 @@ export default class Ml { /** * Stop data frame analytics jobs. A data frame analytics job can be started and stopped multiple times throughout its lifecycle. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-stop-data-frame-analytics | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-stop-data-frame-analytics | Elasticsearch API documentation} */ async stopDataFrameAnalytics (this: That, params: T.MlStopDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async stopDataFrameAnalytics (this: That, params: T.MlStopDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> async stopDataFrameAnalytics (this: That, params: T.MlStopDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise async stopDataFrameAnalytics (this: That, params: T.MlStopDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] + const { + path: acceptedPath + } = this.acceptedParams['ml.stop_data_frame_analytics'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2946,14 +4280,18 @@ export default class Ml { /** * Stop datafeeds. A datafeed that is stopped ceases to retrieve data from Elasticsearch. A datafeed can be started and stopped multiple times throughout its lifecycle. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-stop-datafeed | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-stop-datafeed | Elasticsearch API documentation} */ async stopDatafeed (this: That, params: T.MlStopDatafeedRequest, options?: TransportRequestOptionsWithOutMeta): Promise async stopDatafeed (this: That, params: T.MlStopDatafeedRequest, options?: TransportRequestOptionsWithMeta): Promise> async stopDatafeed (this: That, params: T.MlStopDatafeedRequest, options?: TransportRequestOptions): Promise async stopDatafeed (this: That, params: T.MlStopDatafeedRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['datafeed_id'] - const acceptedBody: string[] = ['allow_no_match', 'force', 'timeout'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.stop_datafeed'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2975,8 +4313,14 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2993,13 +4337,16 @@ export default class Ml { /** * Stop a trained model deployment. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-stop-trained-model-deployment | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-stop-trained-model-deployment | Elasticsearch API documentation} */ async stopTrainedModelDeployment (this: That, params: T.MlStopTrainedModelDeploymentRequest, options?: TransportRequestOptionsWithOutMeta): Promise async stopTrainedModelDeployment (this: That, params: T.MlStopTrainedModelDeploymentRequest, options?: TransportRequestOptionsWithMeta): Promise> async stopTrainedModelDeployment (this: That, params: T.MlStopTrainedModelDeploymentRequest, options?: TransportRequestOptions): Promise async stopTrainedModelDeployment (this: That, params: T.MlStopTrainedModelDeploymentRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['model_id'] + const { + path: acceptedPath + } = this.acceptedParams['ml.stop_trained_model_deployment'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3035,14 +4382,18 @@ export default class Ml { /** * Update a data frame analytics job. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-update-data-frame-analytics | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-update-data-frame-analytics | Elasticsearch API documentation} */ async updateDataFrameAnalytics (this: That, params: T.MlUpdateDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async updateDataFrameAnalytics (this: That, params: T.MlUpdateDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> async updateDataFrameAnalytics (this: That, params: T.MlUpdateDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise async updateDataFrameAnalytics (this: That, params: T.MlUpdateDataFrameAnalyticsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] - const acceptedBody: string[] = ['description', 'model_memory_limit', 'max_num_threads', 'allow_lazy_start'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.update_data_frame_analytics'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3064,8 +4415,14 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -3082,14 +4439,18 @@ export default class Ml { /** * Update a datafeed. You must stop and start the datafeed for the changes to be applied. When Elasticsearch security features are enabled, your datafeed remembers which roles the user who updated it had at the time of the update and runs the query using those same roles. If you provide secondary authorization headers, those credentials are used instead. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-update-datafeed | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-update-datafeed | Elasticsearch API documentation} */ async updateDatafeed (this: That, params: T.MlUpdateDatafeedRequest, options?: TransportRequestOptionsWithOutMeta): Promise async updateDatafeed (this: That, params: T.MlUpdateDatafeedRequest, options?: TransportRequestOptionsWithMeta): Promise> async updateDatafeed (this: That, params: T.MlUpdateDatafeedRequest, options?: TransportRequestOptions): Promise async updateDatafeed (this: That, params: T.MlUpdateDatafeedRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['datafeed_id'] - const acceptedBody: string[] = ['aggregations', 'chunking_config', 'delayed_data_check_config', 'frequency', 'indices', 'indexes', 'indices_options', 'job_id', 'max_empty_searches', 'query', 'query_delay', 'runtime_mappings', 'script_fields', 'scroll_size'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.update_datafeed'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3111,8 +4472,14 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -3129,14 +4496,18 @@ export default class Ml { /** * Update a filter. Updates the description of a filter, adds items, or removes items from the list. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-update-filter | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-update-filter | Elasticsearch API documentation} */ async updateFilter (this: That, params: T.MlUpdateFilterRequest, options?: TransportRequestOptionsWithOutMeta): Promise async updateFilter (this: That, params: T.MlUpdateFilterRequest, options?: TransportRequestOptionsWithMeta): Promise> async updateFilter (this: That, params: T.MlUpdateFilterRequest, options?: TransportRequestOptions): Promise async updateFilter (this: That, params: T.MlUpdateFilterRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['filter_id'] - const acceptedBody: string[] = ['add_items', 'description', 'remove_items'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.update_filter'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3158,8 +4529,14 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -3176,14 +4553,18 @@ export default class Ml { /** * Update an anomaly detection job. Updates certain properties of an anomaly detection job. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-update-job | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-update-job | Elasticsearch API documentation} */ async updateJob (this: That, params: T.MlUpdateJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise async updateJob (this: That, params: T.MlUpdateJobRequest, options?: TransportRequestOptionsWithMeta): Promise> async updateJob (this: That, params: T.MlUpdateJobRequest, options?: TransportRequestOptions): Promise async updateJob (this: That, params: T.MlUpdateJobRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['job_id'] - const acceptedBody: string[] = ['allow_lazy_open', 'analysis_limits', 'background_persist_interval', 'custom_settings', 'categorization_filters', 'description', 'model_plot_config', 'model_prune_window', 'daily_model_snapshot_retention_after_days', 'model_snapshot_retention_days', 'renormalization_window_days', 'results_retention_days', 'groups', 'detectors', 'per_partition_categorization'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.update_job'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3205,8 +4586,14 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -3223,14 +4610,18 @@ export default class Ml { /** * Update a snapshot. Updates certain properties of a snapshot. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-update-model-snapshot | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-update-model-snapshot | Elasticsearch API documentation} */ async updateModelSnapshot (this: That, params: T.MlUpdateModelSnapshotRequest, options?: TransportRequestOptionsWithOutMeta): Promise async updateModelSnapshot (this: That, params: T.MlUpdateModelSnapshotRequest, options?: TransportRequestOptionsWithMeta): Promise> async updateModelSnapshot (this: That, params: T.MlUpdateModelSnapshotRequest, options?: TransportRequestOptions): Promise async updateModelSnapshot (this: That, params: T.MlUpdateModelSnapshotRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['job_id', 'snapshot_id'] - const acceptedBody: string[] = ['description', 'retain'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.update_model_snapshot'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3252,8 +4643,14 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -3271,14 +4668,18 @@ export default class Ml { /** * Update a trained model deployment. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-update-trained-model-deployment | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-update-trained-model-deployment | Elasticsearch API documentation} */ async updateTrainedModelDeployment (this: That, params: T.MlUpdateTrainedModelDeploymentRequest, options?: TransportRequestOptionsWithOutMeta): Promise async updateTrainedModelDeployment (this: That, params: T.MlUpdateTrainedModelDeploymentRequest, options?: TransportRequestOptionsWithMeta): Promise> async updateTrainedModelDeployment (this: That, params: T.MlUpdateTrainedModelDeploymentRequest, options?: TransportRequestOptions): Promise async updateTrainedModelDeployment (this: That, params: T.MlUpdateTrainedModelDeploymentRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['model_id'] - const acceptedBody: string[] = ['number_of_allocations', 'adaptive_allocations'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.update_trained_model_deployment'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3300,8 +4701,14 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -3318,13 +4725,16 @@ export default class Ml { /** * Upgrade a snapshot. Upgrade an anomaly detection model snapshot to the latest major version. Over time, older snapshot formats are deprecated and removed. Anomaly detection jobs support only snapshots that are from the current or previous major version. This API provides a means to upgrade a snapshot to the current major version. This aids in preparing the cluster for an upgrade to the next major version. Only one snapshot per anomaly detection job can be upgraded at a time and the upgraded snapshot cannot be the current snapshot of the anomaly detection job. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-upgrade-job-snapshot | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ml-upgrade-job-snapshot | Elasticsearch API documentation} */ async upgradeJobSnapshot (this: That, params: T.MlUpgradeJobSnapshotRequest, options?: TransportRequestOptionsWithOutMeta): Promise async upgradeJobSnapshot (this: That, params: T.MlUpgradeJobSnapshotRequest, options?: TransportRequestOptionsWithMeta): Promise> async upgradeJobSnapshot (this: That, params: T.MlUpgradeJobSnapshotRequest, options?: TransportRequestOptions): Promise async upgradeJobSnapshot (this: That, params: T.MlUpgradeJobSnapshotRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['job_id', 'snapshot_id'] + const { + path: acceptedPath + } = this.acceptedParams['ml.upgrade_job_snapshot'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3361,14 +4771,18 @@ export default class Ml { /** * Validate an anomaly detection job. - * @see {@link https://www.elastic.co/guide/en/machine-learning/master/ml-jobs.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/guide/en/machine-learning/9.0/ml-jobs.html | Elasticsearch API documentation} */ async validate (this: That, params?: T.MlValidateRequest, options?: TransportRequestOptionsWithOutMeta): Promise async validate (this: That, params?: T.MlValidateRequest, options?: TransportRequestOptionsWithMeta): Promise> async validate (this: That, params?: T.MlValidateRequest, options?: TransportRequestOptions): Promise async validate (this: That, params?: T.MlValidateRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['job_id', 'analysis_config', 'analysis_limits', 'data_description', 'description', 'model_plot', 'model_snapshot_id', 'model_snapshot_retention_days', 'results_index_name'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.validate'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3391,8 +4805,14 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -3406,14 +4826,18 @@ export default class Ml { /** * Validate an anomaly detection job. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/ | Elasticsearch API documentation} */ async validateDetector (this: That, params: T.MlValidateDetectorRequest, options?: TransportRequestOptionsWithOutMeta): Promise async validateDetector (this: That, params: T.MlValidateDetectorRequest, options?: TransportRequestOptionsWithMeta): Promise> async validateDetector (this: That, params: T.MlValidateDetectorRequest, options?: TransportRequestOptions): Promise async validateDetector (this: That, params: T.MlValidateDetectorRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['detector'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['ml.validate_detector'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -3425,8 +4849,14 @@ export default class Ml { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/monitoring.ts b/src/api/api/monitoring.ts index 053fea53a..d6114727e 100644 --- a/src/api/api/monitoring.ts +++ b/src/api/api/monitoring.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,24 +21,50 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class Monitoring { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'monitoring.bulk': { + path: [ + 'type' + ], + body: [ + 'operations' + ], + query: [ + 'system_id', + 'system_api_version', + 'interval' + ] + } + } } /** * Send monitoring data. This API is used by the monitoring features to send monitoring data. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/ | Elasticsearch API documentation} */ async bulk (this: That, params: T.MonitoringBulkRequest, options?: TransportRequestOptionsWithOutMeta): Promise async bulk (this: That, params: T.MonitoringBulkRequest, options?: TransportRequestOptionsWithMeta): Promise> async bulk (this: That, params: T.MonitoringBulkRequest, options?: TransportRequestOptions): Promise async bulk (this: That, params: T.MonitoringBulkRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['type'] - const acceptedBody: string[] = ['operations'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['monitoring.bulk'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -64,8 +76,14 @@ export default class Monitoring { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/msearch.ts b/src/api/api/msearch.ts index 573c4f385..aa11fd77f 100644 --- a/src/api/api/msearch.ts +++ b/src/api/api/msearch.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,18 +21,53 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +const acceptedParams: Record = { + msearch: { + path: [ + 'index' + ], + body: [ + 'searches' + ], + query: [ + 'allow_no_indices', + 'ccs_minimize_roundtrips', + 'expand_wildcards', + 'ignore_throttled', + 'ignore_unavailable', + 'include_named_queries_score', + 'max_concurrent_searches', + 'max_concurrent_shard_requests', + 'pre_filter_shard_size', + 'rest_total_hits_as_int', + 'routing', + 'search_type', + 'typed_keys' + ] + } +} /** * Run multiple searches. The format of the request is similar to the bulk API format and makes use of the newline delimited JSON (NDJSON) format. The structure is as follows: ``` header\n body\n header\n body\n ``` This structure is specifically optimized to reduce parsing if a specific search ends up redirected to another node. IMPORTANT: The final line of data must end with a newline character `\n`. Each newline character may be preceded by a carriage return `\r`. When sending requests to this endpoint the `Content-Type` header should be set to `application/x-ndjson`. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-msearch | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-msearch | Elasticsearch API documentation} */ export default async function MsearchApi> (this: That, params: T.MsearchRequest, options?: TransportRequestOptionsWithOutMeta): Promise> export default async function MsearchApi> (this: That, params: T.MsearchRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> export default async function MsearchApi> (this: That, params: T.MsearchRequest, options?: TransportRequestOptions): Promise> export default async function MsearchApi> (this: That, params: T.MsearchRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['searches'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = acceptedParams.msearch + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -58,8 +79,14 @@ export default async function MsearchApi = { + msearch_template: { + path: [ + 'index' + ], + body: [ + 'search_templates' + ], + query: [ + 'ccs_minimize_roundtrips', + 'max_concurrent_searches', + 'search_type', + 'rest_total_hits_as_int', + 'typed_keys' + ] + } +} /** * Run multiple templated searches. Run multiple templated searches with a single request. If you are providing a text file or text input to `curl`, use the `--data-binary` flag instead of `-d` to preserve newlines. For example: ``` $ cat requests { "index": "my-index" } { "id": "my-search-template", "params": { "query_string": "hello world", "from": 0, "size": 10 }} { "index": "my-other-index" } { "id": "my-other-search-template", "params": { "query_type": "match_all" }} $ curl -H "Content-Type: application/x-ndjson" -XGET localhost:9200/_msearch/template --data-binary "@requests"; echo ``` - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-msearch-template | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-msearch-template | Elasticsearch API documentation} */ export default async function MsearchTemplateApi> (this: That, params: T.MsearchTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise> export default async function MsearchTemplateApi> (this: That, params: T.MsearchTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> export default async function MsearchTemplateApi> (this: That, params: T.MsearchTemplateRequest, options?: TransportRequestOptions): Promise> export default async function MsearchTemplateApi> (this: That, params: T.MsearchTemplateRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['search_templates'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = acceptedParams.msearch_template + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -58,8 +71,14 @@ export default async function MsearchTemplateApi = { + mtermvectors: { + path: [ + 'index' + ], + body: [ + 'docs', + 'ids' + ], + query: [ + 'ids', + 'fields', + 'field_statistics', + 'offsets', + 'payloads', + 'positions', + 'preference', + 'realtime', + 'routing', + 'term_statistics', + 'version', + 'version_type' + ] + } +} /** * Get multiple term vectors. Get multiple term vectors with a single request. You can specify existing documents by index and ID or provide artificial documents in the body of the request. You can specify the index in the request body or request URI. The response contains a `docs` array with all the fetched termvectors. Each element has the structure provided by the termvectors API. **Artificial documents** You can also use `mtermvectors` to generate term vectors for artificial documents provided in the body of the request. The mapping used is determined by the specified `_index`. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-mtermvectors | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-mtermvectors | Elasticsearch API documentation} */ export default async function MtermvectorsApi (this: That, params?: T.MtermvectorsRequest, options?: TransportRequestOptionsWithOutMeta): Promise export default async function MtermvectorsApi (this: That, params?: T.MtermvectorsRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function MtermvectorsApi (this: That, params?: T.MtermvectorsRequest, options?: TransportRequestOptions): Promise export default async function MtermvectorsApi (this: That, params?: T.MtermvectorsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['docs', 'ids'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = acceptedParams.mtermvectors + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -69,8 +90,14 @@ export default async function MtermvectorsApi (this: That, params?: T.Mtermvecto } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/nodes.ts b/src/api/api/nodes.ts index 1ce489ae0..064499823 100644 --- a/src/api/api/nodes.ts +++ b/src/api/api/nodes.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,23 +21,116 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class Nodes { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'nodes.clear_repositories_metering_archive': { + path: [ + 'node_id', + 'max_archive_version' + ], + body: [], + query: [] + }, + 'nodes.get_repositories_metering_info': { + path: [ + 'node_id' + ], + body: [], + query: [] + }, + 'nodes.hot_threads': { + path: [ + 'node_id' + ], + body: [], + query: [ + 'ignore_idle_threads', + 'interval', + 'snapshots', + 'threads', + 'timeout', + 'type', + 'sort' + ] + }, + 'nodes.info': { + path: [ + 'node_id', + 'metric' + ], + body: [], + query: [ + 'flat_settings', + 'timeout' + ] + }, + 'nodes.reload_secure_settings': { + path: [ + 'node_id' + ], + body: [ + 'secure_settings_password' + ], + query: [ + 'timeout' + ] + }, + 'nodes.stats': { + path: [ + 'node_id', + 'metric', + 'index_metric' + ], + body: [], + query: [ + 'completion_fields', + 'fielddata_fields', + 'fields', + 'groups', + 'include_segment_file_sizes', + 'level', + 'timeout', + 'types', + 'include_unloaded_segments' + ] + }, + 'nodes.usage': { + path: [ + 'node_id', + 'metric' + ], + body: [], + query: [ + 'timeout' + ] + } + } } /** * Clear the archived repositories metering. Clear the archived repositories metering information in the cluster. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-nodes-clear-repositories-metering-archive | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-nodes-clear-repositories-metering-archive | Elasticsearch API documentation} */ async clearRepositoriesMeteringArchive (this: That, params: T.NodesClearRepositoriesMeteringArchiveRequest, options?: TransportRequestOptionsWithOutMeta): Promise async clearRepositoriesMeteringArchive (this: That, params: T.NodesClearRepositoriesMeteringArchiveRequest, options?: TransportRequestOptionsWithMeta): Promise> async clearRepositoriesMeteringArchive (this: That, params: T.NodesClearRepositoriesMeteringArchiveRequest, options?: TransportRequestOptions): Promise async clearRepositoriesMeteringArchive (this: That, params: T.NodesClearRepositoriesMeteringArchiveRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['node_id', 'max_archive_version'] + const { + path: acceptedPath + } = this.acceptedParams['nodes.clear_repositories_metering_archive'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -88,13 +167,16 @@ export default class Nodes { /** * Get cluster repositories metering. Get repositories metering information for a cluster. This API exposes monotonically non-decreasing counters and it is expected that clients would durably store the information needed to compute aggregations over a period of time. Additionally, the information exposed by this API is volatile, meaning that it will not be present after node restarts. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-nodes-get-repositories-metering-info | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-nodes-get-repositories-metering-info | Elasticsearch API documentation} */ async getRepositoriesMeteringInfo (this: That, params: T.NodesGetRepositoriesMeteringInfoRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getRepositoriesMeteringInfo (this: That, params: T.NodesGetRepositoriesMeteringInfoRequest, options?: TransportRequestOptionsWithMeta): Promise> async getRepositoriesMeteringInfo (this: That, params: T.NodesGetRepositoriesMeteringInfoRequest, options?: TransportRequestOptions): Promise async getRepositoriesMeteringInfo (this: That, params: T.NodesGetRepositoriesMeteringInfoRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['node_id'] + const { + path: acceptedPath + } = this.acceptedParams['nodes.get_repositories_metering_info'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -130,13 +212,16 @@ export default class Nodes { /** * Get the hot threads for nodes. Get a breakdown of the hot threads on each selected node in the cluster. The output is plain text with a breakdown of the top hot threads for each node. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-nodes-hot-threads | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-nodes-hot-threads | Elasticsearch API documentation} */ async hotThreads (this: That, params?: T.NodesHotThreadsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async hotThreads (this: That, params?: T.NodesHotThreadsRequest, options?: TransportRequestOptionsWithMeta): Promise> async hotThreads (this: That, params?: T.NodesHotThreadsRequest, options?: TransportRequestOptions): Promise async hotThreads (this: That, params?: T.NodesHotThreadsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['node_id'] + const { + path: acceptedPath + } = this.acceptedParams['nodes.hot_threads'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -180,13 +265,16 @@ export default class Nodes { /** * Get node information. By default, the API returns all attributes and core settings for cluster nodes. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-nodes-info | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-nodes-info | Elasticsearch API documentation} */ async info (this: That, params?: T.NodesInfoRequest, options?: TransportRequestOptionsWithOutMeta): Promise async info (this: That, params?: T.NodesInfoRequest, options?: TransportRequestOptionsWithMeta): Promise> async info (this: That, params?: T.NodesInfoRequest, options?: TransportRequestOptions): Promise async info (this: That, params?: T.NodesInfoRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['node_id', 'metric'] + const { + path: acceptedPath + } = this.acceptedParams['nodes.info'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -237,14 +325,18 @@ export default class Nodes { /** * Reload the keystore on nodes in the cluster. Secure settings are stored in an on-disk keystore. Certain of these settings are reloadable. That is, you can change them on disk and reload them without restarting any nodes in the cluster. When you have updated reloadable secure settings in your keystore, you can use this API to reload those settings on each node. When the Elasticsearch keystore is password protected and not simply obfuscated, you must provide the password for the keystore when you reload the secure settings. Reloading the settings for the whole cluster assumes that the keystores for all nodes are protected with the same password; this method is allowed only when inter-node communications are encrypted. Alternatively, you can reload the secure settings on each node by locally accessing the API and passing the node-specific Elasticsearch keystore password. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-nodes-reload-secure-settings | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-nodes-reload-secure-settings | Elasticsearch API documentation} */ async reloadSecureSettings (this: That, params?: T.NodesReloadSecureSettingsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async reloadSecureSettings (this: That, params?: T.NodesReloadSecureSettingsRequest, options?: TransportRequestOptionsWithMeta): Promise> async reloadSecureSettings (this: That, params?: T.NodesReloadSecureSettingsRequest, options?: TransportRequestOptions): Promise async reloadSecureSettings (this: That, params?: T.NodesReloadSecureSettingsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['node_id'] - const acceptedBody: string[] = ['secure_settings_password'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['nodes.reload_secure_settings'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -267,8 +359,14 @@ export default class Nodes { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -292,13 +390,16 @@ export default class Nodes { /** * Get node statistics. Get statistics for nodes in a cluster. By default, all stats are returned. You can limit the returned information by using metrics. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-nodes-stats | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-nodes-stats | Elasticsearch API documentation} */ async stats (this: That, params?: T.NodesStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async stats (this: That, params?: T.NodesStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> async stats (this: That, params?: T.NodesStatsRequest, options?: TransportRequestOptions): Promise async stats (this: That, params?: T.NodesStatsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['node_id', 'metric', 'index_metric'] + const { + path: acceptedPath + } = this.acceptedParams['nodes.stats'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -356,13 +457,16 @@ export default class Nodes { /** * Get feature usage information. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-nodes-usage | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-nodes-usage | Elasticsearch API documentation} */ async usage (this: That, params?: T.NodesUsageRequest, options?: TransportRequestOptionsWithOutMeta): Promise async usage (this: That, params?: T.NodesUsageRequest, options?: TransportRequestOptionsWithMeta): Promise> async usage (this: That, params?: T.NodesUsageRequest, options?: TransportRequestOptions): Promise async usage (this: That, params?: T.NodesUsageRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['node_id', 'metric'] + const { + path: acceptedPath + } = this.acceptedParams['nodes.usage'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/open_point_in_time.ts b/src/api/api/open_point_in_time.ts index 4cd2a733e..3b547e4d3 100644 --- a/src/api/api/open_point_in_time.ts +++ b/src/api/api/open_point_in_time.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,18 +21,47 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +const acceptedParams: Record = { + open_point_in_time: { + path: [ + 'index' + ], + body: [ + 'index_filter' + ], + query: [ + 'keep_alive', + 'ignore_unavailable', + 'preference', + 'routing', + 'expand_wildcards', + 'allow_partial_search_results', + 'max_concurrent_shard_requests' + ] + } +} /** * Open a point in time. A search request by default runs against the most recent visible data of the target indices, which is called point in time. Elasticsearch pit (point in time) is a lightweight view into the state of the data as it existed when initiated. In some cases, it’s preferred to perform multiple search requests using the same point in time. For example, if refreshes happen between `search_after` requests, then the results of those requests might not be consistent as changes happening between searches are only visible to the more recent point in time. A point in time must be opened explicitly before being used in search requests. A subsequent search request with the `pit` parameter must not specify `index`, `routing`, or `preference` values as these parameters are copied from the point in time. Just like regular searches, you can use `from` and `size` to page through point in time search results, up to the first 10,000 hits. If you want to retrieve more hits, use PIT with `search_after`. IMPORTANT: The open point in time request and each subsequent search request can return different identifiers; always use the most recently received ID for the next search request. When a PIT that contains shard failures is used in a search request, the missing are always reported in the search response as a `NoShardAvailableActionException` exception. To get rid of these exceptions, a new PIT needs to be created so that shards missing from the previous PIT can be handled, assuming they become available in the meantime. **Keeping point in time alive** The `keep_alive` parameter, which is passed to a open point in time request and search request, extends the time to live of the corresponding point in time. The value does not need to be long enough to process all data — it just needs to be long enough for the next request. Normally, the background merge process optimizes the index by merging together smaller segments to create new, bigger segments. Once the smaller segments are no longer needed they are deleted. However, open point-in-times prevent the old segments from being deleted since they are still in use. TIP: Keeping older segments alive means that more disk space and file handles are needed. Ensure that you have configured your nodes to have ample free file handles. Additionally, if a segment contains deleted or updated documents then the point in time must keep track of whether each document in the segment was live at the time of the initial search request. Ensure that your nodes have sufficient heap space if you have many open point-in-times on an index that is subject to ongoing deletes or updates. Note that a point-in-time doesn't prevent its associated indices from being deleted. You can check how many point-in-times (that is, search contexts) are open with the nodes stats API. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-open-point-in-time | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-open-point-in-time | Elasticsearch API documentation} */ export default async function OpenPointInTimeApi (this: That, params: T.OpenPointInTimeRequest, options?: TransportRequestOptionsWithOutMeta): Promise export default async function OpenPointInTimeApi (this: That, params: T.OpenPointInTimeRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function OpenPointInTimeApi (this: That, params: T.OpenPointInTimeRequest, options?: TransportRequestOptions): Promise export default async function OpenPointInTimeApi (this: That, params: T.OpenPointInTimeRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['index_filter'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = acceptedParams.open_point_in_time + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -68,8 +83,14 @@ export default async function OpenPointInTimeApi (this: That, params: T.OpenPoin } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/ping.ts b/src/api/api/ping.ts index 908709afd..ea9109110 100644 --- a/src/api/api/ping.ts +++ b/src/api/api/ping.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,17 +21,31 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const acceptedParams: Record = { + ping: { + path: [], + body: [], + query: [] + } +} /** * Ping the cluster. Get information about whether the cluster is running. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-cluster | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/group/endpoint-cluster | Elasticsearch API documentation} */ export default async function PingApi (this: That, params?: T.PingRequest, options?: TransportRequestOptionsWithOutMeta): Promise export default async function PingApi (this: That, params?: T.PingRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function PingApi (this: That, params?: T.PingRequest, options?: TransportRequestOptions): Promise export default async function PingApi (this: That, params?: T.PingRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = acceptedParams.ping + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/profiling.ts b/src/api/api/profiling.ts index 75f2d46cc..7c3be5360 100644 --- a/src/api/api/profiling.ts +++ b/src/api/api/profiling.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,23 +21,53 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} export default class Profiling { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'profiling.flamegraph': { + path: [], + body: [], + query: [] + }, + 'profiling.stacktraces': { + path: [], + body: [], + query: [] + }, + 'profiling.status': { + path: [], + body: [], + query: [] + }, + 'profiling.topn_functions': { + path: [], + body: [], + query: [] + } + } } /** * Extracts a UI-optimized structure to render flamegraphs from Universal Profiling. - * @see {@link https://www.elastic.co/guide/en/observability/master/universal-profiling.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/guide/en/observability/9.0/universal-profiling.html | Elasticsearch API documentation} */ async flamegraph (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise async flamegraph (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> async flamegraph (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise async flamegraph (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['profiling.flamegraph'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -84,13 +100,16 @@ export default class Profiling { /** * Extracts raw stacktrace information from Universal Profiling. - * @see {@link https://www.elastic.co/guide/en/observability/master/universal-profiling.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/guide/en/observability/9.0/universal-profiling.html | Elasticsearch API documentation} */ async stacktraces (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise async stacktraces (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> async stacktraces (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise async stacktraces (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['profiling.stacktraces'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -123,13 +142,16 @@ export default class Profiling { /** * Returns basic information about the status of Universal Profiling. - * @see {@link https://www.elastic.co/guide/en/observability/master/universal-profiling.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/guide/en/observability/9.0/universal-profiling.html | Elasticsearch API documentation} */ async status (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise async status (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> async status (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise async status (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['profiling.status'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -162,13 +184,16 @@ export default class Profiling { /** * Extracts a list of topN functions from Universal Profiling. - * @see {@link https://www.elastic.co/guide/en/observability/master/universal-profiling.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/guide/en/observability/9.0/universal-profiling.html | Elasticsearch API documentation} */ async topnFunctions (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise async topnFunctions (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> async topnFunctions (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise async topnFunctions (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['profiling.topn_functions'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/put_script.ts b/src/api/api/put_script.ts index d3350ca5b..8cec8229e 100644 --- a/src/api/api/put_script.ts +++ b/src/api/api/put_script.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,18 +21,44 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +const acceptedParams: Record = { + put_script: { + path: [ + 'id', + 'context' + ], + body: [ + 'script' + ], + query: [ + 'context', + 'master_timeout', + 'timeout' + ] + } +} /** * Create or update a script or search template. Creates or updates a stored script or search template. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-put-script | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-put-script | Elasticsearch API documentation} */ export default async function PutScriptApi (this: That, params: T.PutScriptRequest, options?: TransportRequestOptionsWithOutMeta): Promise export default async function PutScriptApi (this: That, params: T.PutScriptRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function PutScriptApi (this: That, params: T.PutScriptRequest, options?: TransportRequestOptions): Promise export default async function PutScriptApi (this: That, params: T.PutScriptRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id', 'context'] - const acceptedBody: string[] = ['script'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = acceptedParams.put_script + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -68,8 +80,14 @@ export default async function PutScriptApi (this: That, params: T.PutScriptReque } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/query_rules.ts b/src/api/api/query_rules.ts index bb7a964ee..2e5c17cdf 100644 --- a/src/api/api/query_rules.ts +++ b/src/api/api/query_rules.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,23 +21,104 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class QueryRules { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'query_rules.delete_rule': { + path: [ + 'ruleset_id', + 'rule_id' + ], + body: [], + query: [] + }, + 'query_rules.delete_ruleset': { + path: [ + 'ruleset_id' + ], + body: [], + query: [] + }, + 'query_rules.get_rule': { + path: [ + 'ruleset_id', + 'rule_id' + ], + body: [], + query: [] + }, + 'query_rules.get_ruleset': { + path: [ + 'ruleset_id' + ], + body: [], + query: [] + }, + 'query_rules.list_rulesets': { + path: [], + body: [], + query: [ + 'from', + 'size' + ] + }, + 'query_rules.put_rule': { + path: [ + 'ruleset_id', + 'rule_id' + ], + body: [ + 'type', + 'criteria', + 'actions', + 'priority' + ], + query: [] + }, + 'query_rules.put_ruleset': { + path: [ + 'ruleset_id' + ], + body: [ + 'rules' + ], + query: [] + }, + 'query_rules.test': { + path: [ + 'ruleset_id' + ], + body: [ + 'match_criteria' + ], + query: [] + } + } } /** * Delete a query rule. Delete a query rule within a query ruleset. This is a destructive action that is only recoverable by re-adding the same rule with the create or update query rule API. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-query-rules-delete-rule | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-query-rules-delete-rule | Elasticsearch API documentation} */ async deleteRule (this: That, params: T.QueryRulesDeleteRuleRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteRule (this: That, params: T.QueryRulesDeleteRuleRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteRule (this: That, params: T.QueryRulesDeleteRuleRequest, options?: TransportRequestOptions): Promise async deleteRule (this: That, params: T.QueryRulesDeleteRuleRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['ruleset_id', 'rule_id'] + const { + path: acceptedPath + } = this.acceptedParams['query_rules.delete_rule'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -88,13 +155,16 @@ export default class QueryRules { /** * Delete a query ruleset. Remove a query ruleset and its associated data. This is a destructive action that is not recoverable. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-query-rules-delete-ruleset | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-query-rules-delete-ruleset | Elasticsearch API documentation} */ async deleteRuleset (this: That, params: T.QueryRulesDeleteRulesetRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteRuleset (this: That, params: T.QueryRulesDeleteRulesetRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteRuleset (this: That, params: T.QueryRulesDeleteRulesetRequest, options?: TransportRequestOptions): Promise async deleteRuleset (this: That, params: T.QueryRulesDeleteRulesetRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['ruleset_id'] + const { + path: acceptedPath + } = this.acceptedParams['query_rules.delete_ruleset'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -130,13 +200,16 @@ export default class QueryRules { /** * Get a query rule. Get details about a query rule within a query ruleset. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-query-rules-get-rule | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-query-rules-get-rule | Elasticsearch API documentation} */ async getRule (this: That, params: T.QueryRulesGetRuleRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getRule (this: That, params: T.QueryRulesGetRuleRequest, options?: TransportRequestOptionsWithMeta): Promise> async getRule (this: That, params: T.QueryRulesGetRuleRequest, options?: TransportRequestOptions): Promise async getRule (this: That, params: T.QueryRulesGetRuleRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['ruleset_id', 'rule_id'] + const { + path: acceptedPath + } = this.acceptedParams['query_rules.get_rule'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -173,13 +246,16 @@ export default class QueryRules { /** * Get a query ruleset. Get details about a query ruleset. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-query-rules-get-ruleset | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-query-rules-get-ruleset | Elasticsearch API documentation} */ async getRuleset (this: That, params: T.QueryRulesGetRulesetRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getRuleset (this: That, params: T.QueryRulesGetRulesetRequest, options?: TransportRequestOptionsWithMeta): Promise> async getRuleset (this: That, params: T.QueryRulesGetRulesetRequest, options?: TransportRequestOptions): Promise async getRuleset (this: That, params: T.QueryRulesGetRulesetRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['ruleset_id'] + const { + path: acceptedPath + } = this.acceptedParams['query_rules.get_ruleset'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -215,13 +291,16 @@ export default class QueryRules { /** * Get all query rulesets. Get summarized information about the query rulesets. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-query-rules-list-rulesets | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-query-rules-list-rulesets | Elasticsearch API documentation} */ async listRulesets (this: That, params?: T.QueryRulesListRulesetsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async listRulesets (this: That, params?: T.QueryRulesListRulesetsRequest, options?: TransportRequestOptionsWithMeta): Promise> async listRulesets (this: That, params?: T.QueryRulesListRulesetsRequest, options?: TransportRequestOptions): Promise async listRulesets (this: That, params?: T.QueryRulesListRulesetsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['query_rules.list_rulesets'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -255,14 +334,18 @@ export default class QueryRules { /** * Create or update a query rule. Create or update a query rule within a query ruleset. IMPORTANT: Due to limitations within pinned queries, you can only pin documents using ids or docs, but cannot use both in single rule. It is advised to use one or the other in query rulesets, to avoid errors. Additionally, pinned queries have a maximum limit of 100 pinned hits. If multiple matching rules pin more than 100 documents, only the first 100 documents are pinned in the order they are specified in the ruleset. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-query-rules-put-rule | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-query-rules-put-rule | Elasticsearch API documentation} */ async putRule (this: That, params: T.QueryRulesPutRuleRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putRule (this: That, params: T.QueryRulesPutRuleRequest, options?: TransportRequestOptionsWithMeta): Promise> async putRule (this: That, params: T.QueryRulesPutRuleRequest, options?: TransportRequestOptions): Promise async putRule (this: That, params: T.QueryRulesPutRuleRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['ruleset_id', 'rule_id'] - const acceptedBody: string[] = ['type', 'criteria', 'actions', 'priority'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['query_rules.put_rule'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -284,8 +367,14 @@ export default class QueryRules { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -303,14 +392,18 @@ export default class QueryRules { /** * Create or update a query ruleset. There is a limit of 100 rules per ruleset. This limit can be increased by using the `xpack.applications.rules.max_rules_per_ruleset` cluster setting. IMPORTANT: Due to limitations within pinned queries, you can only select documents using `ids` or `docs`, but cannot use both in single rule. It is advised to use one or the other in query rulesets, to avoid errors. Additionally, pinned queries have a maximum limit of 100 pinned hits. If multiple matching rules pin more than 100 documents, only the first 100 documents are pinned in the order they are specified in the ruleset. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-query-rules-put-ruleset | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-query-rules-put-ruleset | Elasticsearch API documentation} */ async putRuleset (this: That, params: T.QueryRulesPutRulesetRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putRuleset (this: That, params: T.QueryRulesPutRulesetRequest, options?: TransportRequestOptionsWithMeta): Promise> async putRuleset (this: That, params: T.QueryRulesPutRulesetRequest, options?: TransportRequestOptions): Promise async putRuleset (this: That, params: T.QueryRulesPutRulesetRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['ruleset_id'] - const acceptedBody: string[] = ['rules'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['query_rules.put_ruleset'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -332,8 +425,14 @@ export default class QueryRules { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -350,14 +449,18 @@ export default class QueryRules { /** * Test a query ruleset. Evaluate match criteria against a query ruleset to identify the rules that would match that criteria. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-query-rules-test | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-query-rules-test | Elasticsearch API documentation} */ async test (this: That, params: T.QueryRulesTestRequest, options?: TransportRequestOptionsWithOutMeta): Promise async test (this: That, params: T.QueryRulesTestRequest, options?: TransportRequestOptionsWithMeta): Promise> async test (this: That, params: T.QueryRulesTestRequest, options?: TransportRequestOptions): Promise async test (this: That, params: T.QueryRulesTestRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['ruleset_id'] - const acceptedBody: string[] = ['match_criteria'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['query_rules.test'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -379,8 +482,14 @@ export default class QueryRules { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/rank_eval.ts b/src/api/api/rank_eval.ts index bd3af65e5..795b5e3c2 100644 --- a/src/api/api/rank_eval.ts +++ b/src/api/api/rank_eval.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,18 +21,45 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +const acceptedParams: Record = { + rank_eval: { + path: [ + 'index' + ], + body: [ + 'requests', + 'metric' + ], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'ignore_unavailable', + 'search_type' + ] + } +} /** * Evaluate ranked search results. Evaluate the quality of ranked search results over a set of typical search queries. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rank-eval | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-rank-eval | Elasticsearch API documentation} */ export default async function RankEvalApi (this: That, params: T.RankEvalRequest, options?: TransportRequestOptionsWithOutMeta): Promise export default async function RankEvalApi (this: That, params: T.RankEvalRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function RankEvalApi (this: That, params: T.RankEvalRequest, options?: TransportRequestOptions): Promise export default async function RankEvalApi (this: That, params: T.RankEvalRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['requests', 'metric'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = acceptedParams.rank_eval + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -68,8 +81,14 @@ export default async function RankEvalApi (this: That, params: T.RankEvalRequest } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/reindex.ts b/src/api/api/reindex.ts index 5c83f147b..ccda1c795 100644 --- a/src/api/api/reindex.ts +++ b/src/api/api/reindex.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,18 +21,51 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +const acceptedParams: Record = { + reindex: { + path: [], + body: [ + 'conflicts', + 'dest', + 'max_docs', + 'script', + 'size', + 'source' + ], + query: [ + 'refresh', + 'requests_per_second', + 'scroll', + 'slices', + 'timeout', + 'wait_for_active_shards', + 'wait_for_completion', + 'require_alias' + ] + } +} /** * Reindex documents. Copy documents from a source to a destination. You can copy all documents to the destination index or reindex a subset of the documents. The source can be any existing index, alias, or data stream. The destination must differ from the source. For example, you cannot reindex a data stream into itself. IMPORTANT: Reindex requires `_source` to be enabled for all documents in the source. The destination should be configured as wanted before calling the reindex API. Reindex does not copy the settings from the source or its associated template. Mappings, shard counts, and replicas, for example, must be configured ahead of time. If the Elasticsearch security features are enabled, you must have the following security privileges: * The `read` index privilege for the source data stream, index, or alias. * The `write` index privilege for the destination data stream, index, or index alias. * To automatically create a data stream or index with a reindex API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege for the destination data stream, index, or alias. * If reindexing from a remote cluster, the `source.remote.user` must have the `monitor` cluster privilege and the `read` index privilege for the source data stream, index, or alias. If reindexing from a remote cluster, you must explicitly allow the remote host in the `reindex.remote.whitelist` setting. Automatic data stream creation requires a matching index template with data stream enabled. The `dest` element can be configured like the index API to control optimistic concurrency control. Omitting `version_type` or setting it to `internal` causes Elasticsearch to blindly dump documents into the destination, overwriting any that happen to have the same ID. Setting `version_type` to `external` causes Elasticsearch to preserve the `version` from the source, create any documents that are missing, and update any documents that have an older version in the destination than they do in the source. Setting `op_type` to `create` causes the reindex API to create only missing documents in the destination. All existing documents will cause a version conflict. IMPORTANT: Because data streams are append-only, any reindex request to a destination data stream must have an `op_type` of `create`. A reindex can only add new documents to a destination data stream. It cannot update existing documents in a destination data stream. By default, version conflicts abort the reindex process. To continue reindexing if there are conflicts, set the `conflicts` request body property to `proceed`. In this case, the response includes a count of the version conflicts that were encountered. Note that the handling of other error types is unaffected by the `conflicts` property. Additionally, if you opt to count version conflicts, the operation could attempt to reindex more documents from the source than `max_docs` until it has successfully indexed `max_docs` documents into the target or it has gone through every document in the source query. NOTE: The reindex API makes no effort to handle ID collisions. The last document written will "win" but the order isn't usually predictable so it is not a good idea to rely on this behavior. Instead, make sure that IDs are unique by using a script. **Running reindex asynchronously** If the request contains `wait_for_completion=false`, Elasticsearch performs some preflight checks, launches the request, and returns a task you can use to cancel or get the status of the task. Elasticsearch creates a record of this task as a document at `_tasks/`. **Reindex from multiple sources** If you have many sources to reindex it is generally better to reindex them one at a time rather than using a glob pattern to pick up multiple sources. That way you can resume the process if there are any errors by removing the partially completed source and starting over. It also makes parallelizing the process fairly simple: split the list of sources to reindex and run each list in parallel. For example, you can use a bash script like this: ``` for index in i1 i2 i3 i4 i5; do curl -HContent-Type:application/json -XPOST localhost:9200/_reindex?pretty -d'{ "source": { "index": "'$index'" }, "dest": { "index": "'$index'-reindexed" } }' done ``` **Throttling** Set `requests_per_second` to any positive decimal number (`1.4`, `6`, `1000`, for example) to throttle the rate at which reindex issues batches of index operations. Requests are throttled by padding each batch with a wait time. To turn off throttling, set `requests_per_second` to `-1`. The throttling is done by waiting between batches so that the scroll that reindex uses internally can be given a timeout that takes into account the padding. The padding time is the difference between the batch size divided by the `requests_per_second` and the time spent writing. By default the batch size is `1000`, so if `requests_per_second` is set to `500`: ``` target_time = 1000 / 500 per second = 2 seconds wait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds ``` Since the batch is issued as a single bulk request, large batch sizes cause Elasticsearch to create many requests and then wait for a while before starting the next set. This is "bursty" instead of "smooth". **Slicing** Reindex supports sliced scroll to parallelize the reindexing process. This parallelization can improve efficiency and provide a convenient way to break the request down into smaller parts. NOTE: Reindexing from remote clusters does not support manual or automatic slicing. You can slice a reindex request manually by providing a slice ID and total number of slices to each request. You can also let reindex automatically parallelize by using sliced scroll to slice on `_id`. The `slices` parameter specifies the number of slices to use. Adding `slices` to the reindex request just automates the manual process, creating sub-requests which means it has some quirks: * You can see these requests in the tasks API. These sub-requests are "child" tasks of the task for the request with slices. * Fetching the status of the task for the request with `slices` only contains the status of completed slices. * These sub-requests are individually addressable for things like cancellation and rethrottling. * Rethrottling the request with `slices` will rethrottle the unfinished sub-request proportionally. * Canceling the request with `slices` will cancel each sub-request. * Due to the nature of `slices`, each sub-request won't get a perfectly even portion of the documents. All documents will be addressed, but some slices may be larger than others. Expect larger slices to have a more even distribution. * Parameters like `requests_per_second` and `max_docs` on a request with `slices` are distributed proportionally to each sub-request. Combine that with the previous point about distribution being uneven and you should conclude that using `max_docs` with `slices` might not result in exactly `max_docs` documents being reindexed. * Each sub-request gets a slightly different snapshot of the source, though these are all taken at approximately the same time. If slicing automatically, setting `slices` to `auto` will choose a reasonable number for most indices. If slicing manually or otherwise tuning automatic slicing, use the following guidelines. Query performance is most efficient when the number of slices is equal to the number of shards in the index. If that number is large (for example, `500`), choose a lower number as too many slices will hurt performance. Setting slices higher than the number of shards generally does not improve efficiency and adds overhead. Indexing performance scales linearly across available resources with the number of slices. Whether query or indexing performance dominates the runtime depends on the documents being reindexed and cluster resources. **Modify documents during reindexing** Like `_update_by_query`, reindex operations support a script that modifies the document. Unlike `_update_by_query`, the script is allowed to modify the document's metadata. Just as in `_update_by_query`, you can set `ctx.op` to change the operation that is run on the destination. For example, set `ctx.op` to `noop` if your script decides that the document doesn’t have to be indexed in the destination. This "no operation" will be reported in the `noop` counter in the response body. Set `ctx.op` to `delete` if your script decides that the document must be deleted from the destination. The deletion will be reported in the `deleted` counter in the response body. Setting `ctx.op` to anything else will return an error, as will setting any other field in `ctx`. Think of the possibilities! Just be careful; you are able to change: * `_id` * `_index` * `_version` * `_routing` Setting `_version` to `null` or clearing it from the `ctx` map is just like not sending the version in an indexing request. It will cause the document to be overwritten in the destination regardless of the version on the target or the version type you use in the reindex API. **Reindex from remote** Reindex supports reindexing from a remote Elasticsearch cluster. The `host` parameter must contain a scheme, host, port, and optional path. The `username` and `password` parameters are optional and when they are present the reindex operation will connect to the remote Elasticsearch node using basic authentication. Be sure to use HTTPS when using basic authentication or the password will be sent in plain text. There are a range of settings available to configure the behavior of the HTTPS connection. When using Elastic Cloud, it is also possible to authenticate against the remote cluster through the use of a valid API key. Remote hosts must be explicitly allowed with the `reindex.remote.whitelist` setting. It can be set to a comma delimited list of allowed remote host and port combinations. Scheme is ignored; only the host and port are used. For example: ``` reindex.remote.whitelist: [otherhost:9200, another:9200, 127.0.10.*:9200, localhost:*"] ``` The list of allowed hosts must be configured on any nodes that will coordinate the reindex. This feature should work with remote clusters of any version of Elasticsearch. This should enable you to upgrade from any version of Elasticsearch to the current version by reindexing from a cluster of the old version. WARNING: Elasticsearch does not support forward compatibility across major versions. For example, you cannot reindex from a 7.x cluster into a 6.x cluster. To enable queries sent to older versions of Elasticsearch, the `query` parameter is sent directly to the remote host without validation or modification. NOTE: Reindexing from remote clusters does not support manual or automatic slicing. Reindexing from a remote server uses an on-heap buffer that defaults to a maximum size of 100mb. If the remote index includes very large documents you'll need to use a smaller batch size. It is also possible to set the socket read timeout on the remote connection with the `socket_timeout` field and the connection timeout with the `connect_timeout` field. Both default to 30 seconds. **Configuring SSL parameters** Reindex from remote supports configurable SSL settings. These must be specified in the `elasticsearch.yml` file, with the exception of the secure settings, which you add in the Elasticsearch keystore. It is not possible to configure SSL in the body of the reindex request. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-reindex | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-reindex | Elasticsearch API documentation} */ export default async function ReindexApi (this: That, params: T.ReindexRequest, options?: TransportRequestOptionsWithOutMeta): Promise export default async function ReindexApi (this: That, params: T.ReindexRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function ReindexApi (this: That, params: T.ReindexRequest, options?: TransportRequestOptions): Promise export default async function ReindexApi (this: That, params: T.ReindexRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['conflicts', 'dest', 'max_docs', 'script', 'size', 'source'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = acceptedParams.reindex + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -68,8 +87,14 @@ export default async function ReindexApi (this: That, params: T.ReindexRequest, } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/reindex_rethrottle.ts b/src/api/api/reindex_rethrottle.ts index d32f80c01..cf966a44f 100644 --- a/src/api/api/reindex_rethrottle.ts +++ b/src/api/api/reindex_rethrottle.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,17 +21,35 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const acceptedParams: Record = { + reindex_rethrottle: { + path: [ + 'task_id' + ], + body: [], + query: [ + 'requests_per_second' + ] + } +} /** * Throttle a reindex operation. Change the number of requests per second for a particular reindex operation. For example: ``` POST _reindex/r1A2WoRbTwKZ516z6NEs5A:36619/_rethrottle?requests_per_second=-1 ``` Rethrottling that speeds up the query takes effect immediately. Rethrottling that slows down the query will take effect after completing the current batch. This behavior prevents scroll timeouts. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-reindex | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-reindex | Elasticsearch API documentation} */ export default async function ReindexRethrottleApi (this: That, params: T.ReindexRethrottleRequest, options?: TransportRequestOptionsWithOutMeta): Promise export default async function ReindexRethrottleApi (this: That, params: T.ReindexRethrottleRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function ReindexRethrottleApi (this: That, params: T.ReindexRethrottleRequest, options?: TransportRequestOptions): Promise export default async function ReindexRethrottleApi (this: That, params: T.ReindexRethrottleRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['task_id'] + const { + path: acceptedPath + } = acceptedParams.reindex_rethrottle + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/render_search_template.ts b/src/api/api/render_search_template.ts index 57b5377c6..fcc1fbd62 100644 --- a/src/api/api/render_search_template.ts +++ b/src/api/api/render_search_template.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,18 +21,40 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +const acceptedParams: Record = { + render_search_template: { + path: [], + body: [ + 'id', + 'file', + 'params', + 'source' + ], + query: [] + } +} /** * Render a search template. Render a search template as a search request body. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-render-search-template | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-render-search-template | Elasticsearch API documentation} */ export default async function RenderSearchTemplateApi (this: That, params?: T.RenderSearchTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise export default async function RenderSearchTemplateApi (this: That, params?: T.RenderSearchTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function RenderSearchTemplateApi (this: That, params?: T.RenderSearchTemplateRequest, options?: TransportRequestOptions): Promise export default async function RenderSearchTemplateApi (this: That, params?: T.RenderSearchTemplateRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['id', 'file', 'params', 'source'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = acceptedParams.render_search_template + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -69,8 +77,14 @@ export default async function RenderSearchTemplateApi (this: That, params?: T.Re } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/rollup.ts b/src/api/api/rollup.ts index b45043728..f130e2aa7 100644 --- a/src/api/api/rollup.ts +++ b/src/api/api/rollup.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,23 +21,111 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class Rollup { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'rollup.delete_job': { + path: [ + 'id' + ], + body: [], + query: [] + }, + 'rollup.get_jobs': { + path: [ + 'id' + ], + body: [], + query: [] + }, + 'rollup.get_rollup_caps': { + path: [ + 'id' + ], + body: [], + query: [] + }, + 'rollup.get_rollup_index_caps': { + path: [ + 'index' + ], + body: [], + query: [] + }, + 'rollup.put_job': { + path: [ + 'id' + ], + body: [ + 'cron', + 'groups', + 'index_pattern', + 'metrics', + 'page_size', + 'rollup_index', + 'timeout', + 'headers' + ], + query: [] + }, + 'rollup.rollup_search': { + path: [ + 'index' + ], + body: [ + 'aggregations', + 'aggs', + 'query', + 'size' + ], + query: [ + 'rest_total_hits_as_int', + 'typed_keys' + ] + }, + 'rollup.start_job': { + path: [ + 'id' + ], + body: [], + query: [] + }, + 'rollup.stop_job': { + path: [ + 'id' + ], + body: [], + query: [ + 'timeout', + 'wait_for_completion' + ] + } + } } /** * Delete a rollup job. A job must be stopped before it can be deleted. If you attempt to delete a started job, an error occurs. Similarly, if you attempt to delete a nonexistent job, an exception occurs. IMPORTANT: When you delete a job, you remove only the process that is actively monitoring and rolling up data. The API does not delete any previously rolled up data. This is by design; a user may wish to roll up a static data set. Because the data set is static, after it has been fully rolled up there is no need to keep the indexing rollup job around (as there will be no new data). Thus the job can be deleted, leaving behind the rolled up data for analysis. If you wish to also remove the rollup data and the rollup index contains the data for only a single job, you can delete the whole rollup index. If the rollup index stores data from several jobs, you must issue a delete-by-query that targets the rollup job's identifier in the rollup index. For example: ``` POST my_rollup_index/_delete_by_query { "query": { "term": { "_rollup.id": "the_rollup_job_id" } } } ``` - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rollup-delete-job | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-rollup-delete-job | Elasticsearch API documentation} */ async deleteJob (this: That, params: T.RollupDeleteJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteJob (this: That, params: T.RollupDeleteJobRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteJob (this: That, params: T.RollupDeleteJobRequest, options?: TransportRequestOptions): Promise async deleteJob (this: That, params: T.RollupDeleteJobRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] + const { + path: acceptedPath + } = this.acceptedParams['rollup.delete_job'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -87,13 +161,16 @@ export default class Rollup { /** * Get rollup job information. Get the configuration, stats, and status of rollup jobs. NOTE: This API returns only active (both `STARTED` and `STOPPED`) jobs. If a job was created, ran for a while, then was deleted, the API does not return any details about it. For details about a historical rollup job, the rollup capabilities API may be more useful. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rollup-get-jobs | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-rollup-get-jobs | Elasticsearch API documentation} */ async getJobs (this: That, params?: T.RollupGetJobsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getJobs (this: That, params?: T.RollupGetJobsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getJobs (this: That, params?: T.RollupGetJobsRequest, options?: TransportRequestOptions): Promise async getJobs (this: That, params?: T.RollupGetJobsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] + const { + path: acceptedPath + } = this.acceptedParams['rollup.get_jobs'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -137,13 +214,16 @@ export default class Rollup { /** * Get the rollup job capabilities. Get the capabilities of any rollup jobs that have been configured for a specific index or index pattern. This API is useful because a rollup job is often configured to rollup only a subset of fields from the source index. Furthermore, only certain aggregations can be configured for various fields, leading to a limited subset of functionality depending on that configuration. This API enables you to inspect an index and determine: 1. Does this index have associated rollup data somewhere in the cluster? 2. If yes to the first question, what fields were rolled up, what aggregations can be performed, and where does the data live? - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rollup-get-rollup-caps | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-rollup-get-rollup-caps | Elasticsearch API documentation} */ async getRollupCaps (this: That, params?: T.RollupGetRollupCapsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getRollupCaps (this: That, params?: T.RollupGetRollupCapsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getRollupCaps (this: That, params?: T.RollupGetRollupCapsRequest, options?: TransportRequestOptions): Promise async getRollupCaps (this: That, params?: T.RollupGetRollupCapsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] + const { + path: acceptedPath + } = this.acceptedParams['rollup.get_rollup_caps'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -187,13 +267,16 @@ export default class Rollup { /** * Get the rollup index capabilities. Get the rollup capabilities of all jobs inside of a rollup index. A single rollup index may store the data for multiple rollup jobs and may have a variety of capabilities depending on those jobs. This API enables you to determine: * What jobs are stored in an index (or indices specified via a pattern)? * What target indices were rolled up, what fields were used in those rollups, and what aggregations can be performed on each job? - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rollup-get-rollup-index-caps | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-rollup-get-rollup-index-caps | Elasticsearch API documentation} */ async getRollupIndexCaps (this: That, params: T.RollupGetRollupIndexCapsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getRollupIndexCaps (this: That, params: T.RollupGetRollupIndexCapsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getRollupIndexCaps (this: That, params: T.RollupGetRollupIndexCapsRequest, options?: TransportRequestOptions): Promise async getRollupIndexCaps (this: That, params: T.RollupGetRollupIndexCapsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] + const { + path: acceptedPath + } = this.acceptedParams['rollup.get_rollup_index_caps'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -229,14 +312,18 @@ export default class Rollup { /** * Create a rollup job. WARNING: From 8.15.0, calling this API in a cluster with no rollup usage will fail with a message about the deprecation and planned removal of rollup features. A cluster needs to contain either a rollup job or a rollup index in order for this API to be allowed to run. The rollup job configuration contains all the details about how the job should run, when it indexes documents, and what future queries will be able to run against the rollup index. There are three main sections to the job configuration: the logistical details about the job (for example, the cron schedule), the fields that are used for grouping, and what metrics to collect for each group. Jobs are created in a `STOPPED` state. You can start them with the start rollup jobs API. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rollup-put-job | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-rollup-put-job | Elasticsearch API documentation} */ async putJob (this: That, params: T.RollupPutJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putJob (this: That, params: T.RollupPutJobRequest, options?: TransportRequestOptionsWithMeta): Promise> async putJob (this: That, params: T.RollupPutJobRequest, options?: TransportRequestOptions): Promise async putJob (this: That, params: T.RollupPutJobRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] - const acceptedBody: string[] = ['cron', 'groups', 'index_pattern', 'metrics', 'page_size', 'rollup_index', 'timeout', 'headers'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['rollup.put_job'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -258,8 +345,14 @@ export default class Rollup { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -276,14 +369,18 @@ export default class Rollup { /** * Search rolled-up data. The rollup search endpoint is needed because, internally, rolled-up documents utilize a different document structure than the original data. It rewrites standard Query DSL into a format that matches the rollup documents then takes the response and rewrites it back to what a client would expect given the original query. The request body supports a subset of features from the regular search API. The following functionality is not available: `size`: Because rollups work on pre-aggregated data, no search hits can be returned and so size must be set to zero or omitted entirely. `highlighter`, `suggestors`, `post_filter`, `profile`, `explain`: These are similarly disallowed. **Searching both historical rollup and non-rollup data** The rollup search API has the capability to search across both "live" non-rollup data and the aggregated rollup data. This is done by simply adding the live indices to the URI. For example: ``` GET sensor-1,sensor_rollup/_rollup_search { "size": 0, "aggregations": { "max_temperature": { "max": { "field": "temperature" } } } } ``` The rollup search endpoint does two things when the search runs: * The original request is sent to the non-rollup index unaltered. * A rewritten version of the original request is sent to the rollup index. When the two responses are received, the endpoint rewrites the rollup response and merges the two together. During the merging process, if there is any overlap in buckets between the two responses, the buckets from the non-rollup index are used. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rollup-rollup-search | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-rollup-rollup-search | Elasticsearch API documentation} */ async rollupSearch> (this: That, params: T.RollupRollupSearchRequest, options?: TransportRequestOptionsWithOutMeta): Promise> async rollupSearch> (this: That, params: T.RollupRollupSearchRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> async rollupSearch> (this: That, params: T.RollupRollupSearchRequest, options?: TransportRequestOptions): Promise> async rollupSearch> (this: That, params: T.RollupRollupSearchRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['aggregations', 'aggs', 'query', 'size'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['rollup.rollup_search'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -305,8 +402,14 @@ export default class Rollup { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -323,13 +426,16 @@ export default class Rollup { /** * Start rollup jobs. If you try to start a job that does not exist, an exception occurs. If you try to start a job that is already started, nothing happens. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rollup-start-job | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-rollup-start-job | Elasticsearch API documentation} */ async startJob (this: That, params: T.RollupStartJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise async startJob (this: That, params: T.RollupStartJobRequest, options?: TransportRequestOptionsWithMeta): Promise> async startJob (this: That, params: T.RollupStartJobRequest, options?: TransportRequestOptions): Promise async startJob (this: That, params: T.RollupStartJobRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] + const { + path: acceptedPath + } = this.acceptedParams['rollup.start_job'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -365,13 +471,16 @@ export default class Rollup { /** * Stop rollup jobs. If you try to stop a job that does not exist, an exception occurs. If you try to stop a job that is already stopped, nothing happens. Since only a stopped job can be deleted, it can be useful to block the API until the indexer has fully stopped. This is accomplished with the `wait_for_completion` query parameter, and optionally a timeout. For example: ``` POST _rollup/job/sensor/_stop?wait_for_completion=true&timeout=10s ``` The parameter blocks the API call from returning until either the job has moved to STOPPED or the specified time has elapsed. If the specified time elapses without the job moving to STOPPED, a timeout exception occurs. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-rollup-stop-job | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-rollup-stop-job | Elasticsearch API documentation} */ async stopJob (this: That, params: T.RollupStopJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise async stopJob (this: That, params: T.RollupStopJobRequest, options?: TransportRequestOptionsWithMeta): Promise> async stopJob (this: That, params: T.RollupStopJobRequest, options?: TransportRequestOptions): Promise async stopJob (this: That, params: T.RollupStopJobRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] + const { + path: acceptedPath + } = this.acceptedParams['rollup.stop_job'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/scripts_painless_execute.ts b/src/api/api/scripts_painless_execute.ts index bbafbeff1..1d524bc02 100644 --- a/src/api/api/scripts_painless_execute.ts +++ b/src/api/api/scripts_painless_execute.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,18 +21,39 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +const acceptedParams: Record = { + scripts_painless_execute: { + path: [], + body: [ + 'context', + 'context_setup', + 'script' + ], + query: [] + } +} /** * Run a script. Runs a script and returns a result. Use this API to build and test scripts, such as when defining a script for a runtime field. This API requires very few dependencies and is especially useful if you don't have permissions to write documents on a cluster. The API uses several _contexts_, which control how scripts are run, what variables are available at runtime, and what the return type is. Each context requires a script, but additional parameters depend on the context you're using for that script. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/painless/master/painless-execute-api.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/reference/scripting-languages/painless/painless-api-examples | Elasticsearch API documentation} */ export default async function ScriptsPainlessExecuteApi (this: That, params?: T.ScriptsPainlessExecuteRequest, options?: TransportRequestOptionsWithOutMeta): Promise> export default async function ScriptsPainlessExecuteApi (this: That, params?: T.ScriptsPainlessExecuteRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> export default async function ScriptsPainlessExecuteApi (this: That, params?: T.ScriptsPainlessExecuteRequest, options?: TransportRequestOptions): Promise> export default async function ScriptsPainlessExecuteApi (this: That, params?: T.ScriptsPainlessExecuteRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['context', 'context_setup', 'script'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = acceptedParams.scripts_painless_execute + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -69,8 +76,14 @@ export default async function ScriptsPainlessExecuteApi (this } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/scroll.ts b/src/api/api/scroll.ts index 5bd03110b..1bb814335 100644 --- a/src/api/api/scroll.ts +++ b/src/api/api/scroll.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,18 +21,42 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +const acceptedParams: Record = { + scroll: { + path: [], + body: [ + 'scroll', + 'scroll_id' + ], + query: [ + 'scroll', + 'scroll_id', + 'rest_total_hits_as_int' + ] + } +} /** * Run a scrolling search. IMPORTANT: The scroll API is no longer recommend for deep pagination. If you need to preserve the index state while paging through more than 10,000 hits, use the `search_after` parameter with a point in time (PIT). The scroll API gets large sets of results from a single scrolling search request. To get the necessary scroll ID, submit a search API request that includes an argument for the `scroll` query parameter. The `scroll` parameter indicates how long Elasticsearch should retain the search context for the request. The search response returns a scroll ID in the `_scroll_id` response body parameter. You can then use the scroll ID with the scroll API to retrieve the next batch of results for the request. If the Elasticsearch security features are enabled, the access to the results of a specific scroll ID is restricted to the user or API key that submitted the search. You can also use the scroll API to specify a new scroll parameter that extends or shortens the retention period for the search context. IMPORTANT: Results from a scrolling search reflect the state of the index at the time of the initial search request. Subsequent indexing or document changes only affect later search and scroll requests. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-scroll | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-scroll | Elasticsearch API documentation} */ export default async function ScrollApi> (this: That, params: T.ScrollRequest, options?: TransportRequestOptionsWithOutMeta): Promise> export default async function ScrollApi> (this: That, params: T.ScrollRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> export default async function ScrollApi> (this: That, params: T.ScrollRequest, options?: TransportRequestOptions): Promise> export default async function ScrollApi> (this: That, params: T.ScrollRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['scroll', 'scroll_id'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = acceptedParams.scroll + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -68,8 +78,14 @@ export default async function ScrollApi = { + search: { + path: [ + 'index' + ], + body: [ + 'aggregations', + 'aggs', + 'collapse', + 'explain', + 'ext', + 'from', + 'highlight', + 'track_total_hits', + 'indices_boost', + 'docvalue_fields', + 'knn', + 'rank', + 'min_score', + 'post_filter', + 'profile', + 'query', + 'rescore', + 'retriever', + 'script_fields', + 'search_after', + 'size', + 'slice', + 'sort', + '_source', + 'fields', + 'suggest', + 'terminate_after', + 'timeout', + 'track_scores', + 'version', + 'seq_no_primary_term', + 'stored_fields', + 'pit', + 'runtime_mappings', + 'stats' + ], + query: [ + 'allow_no_indices', + 'allow_partial_search_results', + 'analyzer', + 'analyze_wildcard', + 'batched_reduce_size', + 'ccs_minimize_roundtrips', + 'default_operator', + 'df', + 'docvalue_fields', + 'expand_wildcards', + 'explain', + 'ignore_throttled', + 'ignore_unavailable', + 'include_named_queries_score', + 'lenient', + 'max_concurrent_shard_requests', + 'preference', + 'pre_filter_shard_size', + 'request_cache', + 'routing', + 'scroll', + 'search_type', + 'stats', + 'stored_fields', + 'suggest_field', + 'suggest_mode', + 'suggest_size', + 'suggest_text', + 'terminate_after', + 'timeout', + 'track_total_hits', + 'track_scores', + 'typed_keys', + 'rest_total_hits_as_int', + 'version', + '_source', + '_source_excludes', + '_source_includes', + 'seq_no_primary_term', + 'q', + 'size', + 'from', + 'sort', + 'force_synthetic_source' + ] + } +} /** * Run a search. Get search hits that match the query defined in the request. You can provide search queries using the `q` query string parameter or the request body. If both are specified, only the query parameter is used. If the Elasticsearch security features are enabled, you must have the read index privilege for the target data stream, index, or alias. For cross-cluster search, refer to the documentation about configuring CCS privileges. To search a point in time (PIT) for an alias, you must have the `read` index privilege for the alias's data streams or indices. **Search slicing** When paging through a large number of documents, it can be helpful to split the search into multiple slices to consume them independently with the `slice` and `pit` properties. By default the splitting is done first on the shards, then locally on each shard. The local splitting partitions the shard into contiguous ranges based on Lucene document IDs. For instance if the number of shards is equal to 2 and you request 4 slices, the slices 0 and 2 are assigned to the first shard and the slices 1 and 3 are assigned to the second shard. IMPORTANT: The same point-in-time ID should be used for all slices. If different PIT IDs are used, slices can overlap and miss documents. This situation can occur because the splitting criterion is based on Lucene document IDs, which are not stable across changes to the index. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-search | Elasticsearch API documentation} */ export default async function SearchApi> (this: That, params?: T.SearchRequest, options?: TransportRequestOptionsWithOutMeta): Promise> export default async function SearchApi> (this: That, params?: T.SearchRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> export default async function SearchApi> (this: That, params?: T.SearchRequest, options?: TransportRequestOptions): Promise> export default async function SearchApi> (this: That, params?: T.SearchRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['aggregations', 'aggs', 'collapse', 'explain', 'ext', 'from', 'highlight', 'track_total_hits', 'indices_boost', 'docvalue_fields', 'knn', 'rank', 'min_score', 'post_filter', 'profile', 'query', 'rescore', 'retriever', 'script_fields', 'search_after', 'size', 'slice', 'sort', '_source', 'fields', 'suggest', 'terminate_after', 'timeout', 'track_scores', 'version', 'seq_no_primary_term', 'stored_fields', 'pit', 'runtime_mappings', 'stats'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = acceptedParams.search + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -73,8 +159,14 @@ export default async function SearchApi +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class SearchApplication { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'search_application.delete': { + path: [ + 'name' + ], + body: [], + query: [] + }, + 'search_application.delete_behavioral_analytics': { + path: [ + 'name' + ], + body: [], + query: [] + }, + 'search_application.get': { + path: [ + 'name' + ], + body: [], + query: [] + }, + 'search_application.get_behavioral_analytics': { + path: [ + 'name' + ], + body: [], + query: [] + }, + 'search_application.list': { + path: [], + body: [], + query: [ + 'q', + 'from', + 'size' + ] + }, + 'search_application.post_behavioral_analytics_event': { + path: [ + 'collection_name', + 'event_type' + ], + body: [ + 'payload' + ], + query: [ + 'debug' + ] + }, + 'search_application.put': { + path: [ + 'name' + ], + body: [ + 'search_application' + ], + query: [ + 'create' + ] + }, + 'search_application.put_behavioral_analytics': { + path: [ + 'name' + ], + body: [], + query: [] + }, + 'search_application.render_query': { + path: [ + 'name' + ], + body: [ + 'params' + ], + query: [] + }, + 'search_application.search': { + path: [ + 'name' + ], + body: [ + 'params' + ], + query: [ + 'typed_keys' + ] + } + } } /** * Delete a search application. Remove a search application and its associated alias. Indices attached to the search application are not removed. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-delete | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-search-application-delete | Elasticsearch API documentation} */ async delete (this: That, params: T.SearchApplicationDeleteRequest, options?: TransportRequestOptionsWithOutMeta): Promise async delete (this: That, params: T.SearchApplicationDeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> async delete (this: That, params: T.SearchApplicationDeleteRequest, options?: TransportRequestOptions): Promise async delete (this: That, params: T.SearchApplicationDeleteRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['search_application.delete'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -87,13 +172,16 @@ export default class SearchApplication { /** * Delete a behavioral analytics collection. The associated data stream is also deleted. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-delete-behavioral-analytics | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-search-application-delete-behavioral-analytics | Elasticsearch API documentation} */ async deleteBehavioralAnalytics (this: That, params: T.SearchApplicationDeleteBehavioralAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteBehavioralAnalytics (this: That, params: T.SearchApplicationDeleteBehavioralAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteBehavioralAnalytics (this: That, params: T.SearchApplicationDeleteBehavioralAnalyticsRequest, options?: TransportRequestOptions): Promise async deleteBehavioralAnalytics (this: That, params: T.SearchApplicationDeleteBehavioralAnalyticsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['search_application.delete_behavioral_analytics'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -129,13 +217,16 @@ export default class SearchApplication { /** * Get search application details. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-get | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-search-application-get | Elasticsearch API documentation} */ async get (this: That, params: T.SearchApplicationGetRequest, options?: TransportRequestOptionsWithOutMeta): Promise async get (this: That, params: T.SearchApplicationGetRequest, options?: TransportRequestOptionsWithMeta): Promise> async get (this: That, params: T.SearchApplicationGetRequest, options?: TransportRequestOptions): Promise async get (this: That, params: T.SearchApplicationGetRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['search_application.get'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -171,13 +262,16 @@ export default class SearchApplication { /** * Get behavioral analytics collections. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-get-behavioral-analytics | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-search-application-get-behavioral-analytics | Elasticsearch API documentation} */ async getBehavioralAnalytics (this: That, params?: T.SearchApplicationGetBehavioralAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getBehavioralAnalytics (this: That, params?: T.SearchApplicationGetBehavioralAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getBehavioralAnalytics (this: That, params?: T.SearchApplicationGetBehavioralAnalyticsRequest, options?: TransportRequestOptions): Promise async getBehavioralAnalytics (this: That, params?: T.SearchApplicationGetBehavioralAnalyticsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['search_application.get_behavioral_analytics'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -221,13 +315,16 @@ export default class SearchApplication { /** * Get search applications. Get information about search applications. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-get-behavioral-analytics | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-search-application-get-behavioral-analytics | Elasticsearch API documentation} */ async list (this: That, params?: T.SearchApplicationListRequest, options?: TransportRequestOptionsWithOutMeta): Promise async list (this: That, params?: T.SearchApplicationListRequest, options?: TransportRequestOptionsWithMeta): Promise> async list (this: That, params?: T.SearchApplicationListRequest, options?: TransportRequestOptions): Promise async list (this: That, params?: T.SearchApplicationListRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['search_application.list'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -261,14 +358,18 @@ export default class SearchApplication { /** * Create a behavioral analytics collection event. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-post-behavioral-analytics-event | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-search-application-post-behavioral-analytics-event | Elasticsearch API documentation} */ async postBehavioralAnalyticsEvent (this: That, params: T.SearchApplicationPostBehavioralAnalyticsEventRequest, options?: TransportRequestOptionsWithOutMeta): Promise async postBehavioralAnalyticsEvent (this: That, params: T.SearchApplicationPostBehavioralAnalyticsEventRequest, options?: TransportRequestOptionsWithMeta): Promise> async postBehavioralAnalyticsEvent (this: That, params: T.SearchApplicationPostBehavioralAnalyticsEventRequest, options?: TransportRequestOptions): Promise async postBehavioralAnalyticsEvent (this: That, params: T.SearchApplicationPostBehavioralAnalyticsEventRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['collection_name', 'event_type'] - const acceptedBody: string[] = ['payload'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['search_application.post_behavioral_analytics_event'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -280,8 +381,14 @@ export default class SearchApplication { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -299,14 +406,18 @@ export default class SearchApplication { /** * Create or update a search application. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-put | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-search-application-put | Elasticsearch API documentation} */ async put (this: That, params: T.SearchApplicationPutRequest, options?: TransportRequestOptionsWithOutMeta): Promise async put (this: That, params: T.SearchApplicationPutRequest, options?: TransportRequestOptionsWithMeta): Promise> async put (this: That, params: T.SearchApplicationPutRequest, options?: TransportRequestOptions): Promise async put (this: That, params: T.SearchApplicationPutRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] - const acceptedBody: string[] = ['search_application'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['search_application.put'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -318,8 +429,14 @@ export default class SearchApplication { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -336,13 +453,16 @@ export default class SearchApplication { /** * Create a behavioral analytics collection. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-put-behavioral-analytics | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-search-application-put-behavioral-analytics | Elasticsearch API documentation} */ async putBehavioralAnalytics (this: That, params: T.SearchApplicationPutBehavioralAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putBehavioralAnalytics (this: That, params: T.SearchApplicationPutBehavioralAnalyticsRequest, options?: TransportRequestOptionsWithMeta): Promise> async putBehavioralAnalytics (this: That, params: T.SearchApplicationPutBehavioralAnalyticsRequest, options?: TransportRequestOptions): Promise async putBehavioralAnalytics (this: That, params: T.SearchApplicationPutBehavioralAnalyticsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['search_application.put_behavioral_analytics'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -378,14 +498,18 @@ export default class SearchApplication { /** * Render a search application query. Generate an Elasticsearch query using the specified query parameters and the search template associated with the search application or a default template if none is specified. If a parameter used in the search template is not specified in `params`, the parameter's default value will be used. The API returns the specific Elasticsearch query that would be generated and run by calling the search application search API. You must have `read` privileges on the backing alias of the search application. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-render-query | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-search-application-render-query | Elasticsearch API documentation} */ async renderQuery (this: That, params: T.SearchApplicationRenderQueryRequest, options?: TransportRequestOptionsWithOutMeta): Promise async renderQuery (this: That, params: T.SearchApplicationRenderQueryRequest, options?: TransportRequestOptionsWithMeta): Promise> async renderQuery (this: That, params: T.SearchApplicationRenderQueryRequest, options?: TransportRequestOptions): Promise async renderQuery (this: That, params: T.SearchApplicationRenderQueryRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] - const acceptedBody: string[] = ['params'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['search_application.render_query'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -407,8 +531,14 @@ export default class SearchApplication { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -425,14 +555,18 @@ export default class SearchApplication { /** * Run a search application search. Generate and run an Elasticsearch query that uses the specified query parameteter and the search template associated with the search application or default template. Unspecified template parameters are assigned their default values if applicable. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-application-search | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-search-application-search | Elasticsearch API documentation} */ async search> (this: That, params: T.SearchApplicationSearchRequest, options?: TransportRequestOptionsWithOutMeta): Promise> async search> (this: That, params: T.SearchApplicationSearchRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> async search> (this: That, params: T.SearchApplicationSearchRequest, options?: TransportRequestOptions): Promise> async search> (this: That, params: T.SearchApplicationSearchRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] - const acceptedBody: string[] = ['params'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['search_application.search'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -454,8 +588,14 @@ export default class SearchApplication { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/search_mvt.ts b/src/api/api/search_mvt.ts index c9384a91e..57f32b7ce 100644 --- a/src/api/api/search_mvt.ts +++ b/src/api/api/search_mvt.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,18 +21,64 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +const acceptedParams: Record = { + search_mvt: { + path: [ + 'index', + 'field', + 'zoom', + 'x', + 'y' + ], + body: [ + 'aggs', + 'buffer', + 'exact_bounds', + 'extent', + 'fields', + 'grid_agg', + 'grid_precision', + 'grid_type', + 'query', + 'runtime_mappings', + 'size', + 'sort', + 'track_total_hits', + 'with_labels' + ], + query: [ + 'exact_bounds', + 'extent', + 'grid_agg', + 'grid_precision', + 'grid_type', + 'size', + 'with_labels' + ] + } +} /** * Search a vector tile. Search a vector tile for geospatial values. Before using this API, you should be familiar with the Mapbox vector tile specification. The API returns results as a binary mapbox vector tile. Internally, Elasticsearch translates a vector tile search API request into a search containing: * A `geo_bounding_box` query on the ``. The query uses the `//` tile as a bounding box. * A `geotile_grid` or `geohex_grid` aggregation on the ``. The `grid_agg` parameter determines the aggregation type. The aggregation uses the `//` tile as a bounding box. * Optionally, a `geo_bounds` aggregation on the ``. The search only includes this aggregation if the `exact_bounds` parameter is `true`. * If the optional parameter `with_labels` is `true`, the internal search will include a dynamic runtime field that calls the `getLabelPosition` function of the geometry doc value. This enables the generation of new point features containing suggested geometry labels, so that, for example, multi-polygons will have only one label. For example, Elasticsearch may translate a vector tile search API request with a `grid_agg` argument of `geotile` and an `exact_bounds` argument of `true` into the following search ``` GET my-index/_search { "size": 10000, "query": { "geo_bounding_box": { "my-geo-field": { "top_left": { "lat": -40.979898069620134, "lon": -45 }, "bottom_right": { "lat": -66.51326044311186, "lon": 0 } } } }, "aggregations": { "grid": { "geotile_grid": { "field": "my-geo-field", "precision": 11, "size": 65536, "bounds": { "top_left": { "lat": -40.979898069620134, "lon": -45 }, "bottom_right": { "lat": -66.51326044311186, "lon": 0 } } } }, "bounds": { "geo_bounds": { "field": "my-geo-field", "wrap_longitude": false } } } } ``` The API returns results as a binary Mapbox vector tile. Mapbox vector tiles are encoded as Google Protobufs (PBF). By default, the tile contains three layers: * A `hits` layer containing a feature for each `` value matching the `geo_bounding_box` query. * An `aggs` layer containing a feature for each cell of the `geotile_grid` or `geohex_grid`. The layer only contains features for cells with matching data. * A meta layer containing: * A feature containing a bounding box. By default, this is the bounding box of the tile. * Value ranges for any sub-aggregations on the `geotile_grid` or `geohex_grid`. * Metadata for the search. The API only returns features that can display at its zoom level. For example, if a polygon feature has no area at its zoom level, the API omits it. The API returns errors as UTF-8 encoded JSON. IMPORTANT: You can specify several options for this API as either a query parameter or request body parameter. If you specify both parameters, the query parameter takes precedence. **Grid precision for geotile** For a `grid_agg` of `geotile`, you can use cells in the `aggs` layer as tiles for lower zoom levels. `grid_precision` represents the additional zoom levels available through these cells. The final precision is computed by as follows: ` + grid_precision`. For example, if `` is 7 and `grid_precision` is 8, then the `geotile_grid` aggregation will use a precision of 15. The maximum final precision is 29. The `grid_precision` also determines the number of cells for the grid as follows: `(2^grid_precision) x (2^grid_precision)`. For example, a value of 8 divides the tile into a grid of 256 x 256 cells. The `aggs` layer only contains features for cells with matching data. **Grid precision for geohex** For a `grid_agg` of `geohex`, Elasticsearch uses `` and `grid_precision` to calculate a final precision as follows: ` + grid_precision`. This precision determines the H3 resolution of the hexagonal cells produced by the `geohex` aggregation. The following table maps the H3 resolution for each precision. For example, if `` is 3 and `grid_precision` is 3, the precision is 6. At a precision of 6, hexagonal cells have an H3 resolution of 2. If `` is 3 and `grid_precision` is 4, the precision is 7. At a precision of 7, hexagonal cells have an H3 resolution of 3. | Precision | Unique tile bins | H3 resolution | Unique hex bins | Ratio | | --------- | ---------------- | ------------- | ----------------| ----- | | 1 | 4 | 0 | 122 | 30.5 | | 2 | 16 | 0 | 122 | 7.625 | | 3 | 64 | 1 | 842 | 13.15625 | | 4 | 256 | 1 | 842 | 3.2890625 | | 5 | 1024 | 2 | 5882 | 5.744140625 | | 6 | 4096 | 2 | 5882 | 1.436035156 | | 7 | 16384 | 3 | 41162 | 2.512329102 | | 8 | 65536 | 3 | 41162 | 0.6280822754 | | 9 | 262144 | 4 | 288122 | 1.099098206 | | 10 | 1048576 | 4 | 288122 | 0.2747745514 | | 11 | 4194304 | 5 | 2016842 | 0.4808526039 | | 12 | 16777216 | 6 | 14117882 | 0.8414913416 | | 13 | 67108864 | 6 | 14117882 | 0.2103728354 | | 14 | 268435456 | 7 | 98825162 | 0.3681524172 | | 15 | 1073741824 | 8 | 691776122 | 0.644266719 | | 16 | 4294967296 | 8 | 691776122 | 0.1610666797 | | 17 | 17179869184 | 9 | 4842432842 | 0.2818666889 | | 18 | 68719476736 | 10 | 33897029882 | 0.4932667053 | | 19 | 274877906944 | 11 | 237279209162 | 0.8632167343 | | 20 | 1099511627776 | 11 | 237279209162 | 0.2158041836 | | 21 | 4398046511104 | 12 | 1660954464122 | 0.3776573213 | | 22 | 17592186044416 | 13 | 11626681248842 | 0.6609003122 | | 23 | 70368744177664 | 13 | 11626681248842 | 0.165225078 | | 24 | 281474976710656 | 14 | 81386768741882 | 0.2891438866 | | 25 | 1125899906842620 | 15 | 569707381193162 | 0.5060018015 | | 26 | 4503599627370500 | 15 | 569707381193162 | 0.1265004504 | | 27 | 18014398509482000 | 15 | 569707381193162 | 0.03162511259 | | 28 | 72057594037927900 | 15 | 569707381193162 | 0.007906278149 | | 29 | 288230376151712000 | 15 | 569707381193162 | 0.001976569537 | Hexagonal cells don't align perfectly on a vector tile. Some cells may intersect more than one vector tile. To compute the H3 resolution for each precision, Elasticsearch compares the average density of hexagonal bins at each resolution with the average density of tile bins at each zoom level. Elasticsearch uses the H3 resolution that is closest to the corresponding geotile density. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-mvt | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-search-mvt | Elasticsearch API documentation} */ export default async function SearchMvtApi (this: That, params: T.SearchMvtRequest, options?: TransportRequestOptionsWithOutMeta): Promise export default async function SearchMvtApi (this: That, params: T.SearchMvtRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function SearchMvtApi (this: That, params: T.SearchMvtRequest, options?: TransportRequestOptions): Promise export default async function SearchMvtApi (this: That, params: T.SearchMvtRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index', 'field', 'zoom', 'x', 'y'] - const acceptedBody: string[] = ['aggs', 'buffer', 'exact_bounds', 'extent', 'fields', 'grid_agg', 'grid_precision', 'grid_type', 'query', 'runtime_mappings', 'size', 'sort', 'track_total_hits', 'with_labels'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = acceptedParams.search_mvt + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -68,8 +100,14 @@ export default async function SearchMvtApi (this: That, params: T.SearchMvtReque } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/search_shards.ts b/src/api/api/search_shards.ts index f2fff30a5..e82889be1 100644 --- a/src/api/api/search_shards.ts +++ b/src/api/api/search_shards.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,17 +21,41 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const acceptedParams: Record = { + search_shards: { + path: [ + 'index' + ], + body: [], + query: [ + 'allow_no_indices', + 'expand_wildcards', + 'ignore_unavailable', + 'local', + 'master_timeout', + 'preference', + 'routing' + ] + } +} /** * Get the search shards. Get the indices and shards that a search request would be run against. This information can be useful for working out issues or planning optimizations with routing and shard preferences. When filtered aliases are used, the filter is returned as part of the `indices` section. If the Elasticsearch security features are enabled, you must have the `view_index_metadata` or `manage` index privilege for the target data stream, index, or alias. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-shards | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-search-shards | Elasticsearch API documentation} */ export default async function SearchShardsApi (this: That, params?: T.SearchShardsRequest, options?: TransportRequestOptionsWithOutMeta): Promise export default async function SearchShardsApi (this: That, params?: T.SearchShardsRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function SearchShardsApi (this: That, params?: T.SearchShardsRequest, options?: TransportRequestOptions): Promise export default async function SearchShardsApi (this: That, params?: T.SearchShardsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] + const { + path: acceptedPath + } = acceptedParams.search_shards + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/search_template.ts b/src/api/api/search_template.ts index f63c77a45..1b4d9e066 100644 --- a/src/api/api/search_template.ts +++ b/src/api/api/search_template.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,18 +21,57 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +const acceptedParams: Record = { + search_template: { + path: [ + 'index' + ], + body: [ + 'explain', + 'id', + 'params', + 'profile', + 'source' + ], + query: [ + 'allow_no_indices', + 'ccs_minimize_roundtrips', + 'expand_wildcards', + 'explain', + 'ignore_throttled', + 'ignore_unavailable', + 'preference', + 'profile', + 'routing', + 'scroll', + 'search_type', + 'rest_total_hits_as_int', + 'typed_keys' + ] + } +} /** * Run a search with a search template. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-search-template | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-search-template | Elasticsearch API documentation} */ export default async function SearchTemplateApi (this: That, params?: T.SearchTemplateRequest, options?: TransportRequestOptionsWithOutMeta): Promise> export default async function SearchTemplateApi (this: That, params?: T.SearchTemplateRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> export default async function SearchTemplateApi (this: That, params?: T.SearchTemplateRequest, options?: TransportRequestOptions): Promise> export default async function SearchTemplateApi (this: That, params?: T.SearchTemplateRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['explain', 'id', 'params', 'profile', 'source'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = acceptedParams.search_template + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -69,8 +94,14 @@ export default async function SearchTemplateApi (this: That } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/searchable_snapshots.ts b/src/api/api/searchable_snapshots.ts index 4c8af1dda..0d642e793 100644 --- a/src/api/api/searchable_snapshots.ts +++ b/src/api/api/searchable_snapshots.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,23 +21,81 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class SearchableSnapshots { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'searchable_snapshots.cache_stats': { + path: [ + 'node_id' + ], + body: [], + query: [ + 'master_timeout' + ] + }, + 'searchable_snapshots.clear_cache': { + path: [ + 'index' + ], + body: [], + query: [ + 'expand_wildcards', + 'allow_no_indices', + 'ignore_unavailable' + ] + }, + 'searchable_snapshots.mount': { + path: [ + 'repository', + 'snapshot' + ], + body: [ + 'index', + 'renamed_index', + 'index_settings', + 'ignore_index_settings' + ], + query: [ + 'master_timeout', + 'wait_for_completion', + 'storage' + ] + }, + 'searchable_snapshots.stats': { + path: [ + 'index' + ], + body: [], + query: [ + 'level' + ] + } + } } /** * Get cache statistics. Get statistics about the shared cache for partially mounted indices. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-searchable-snapshots-cache-stats | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-searchable-snapshots-cache-stats | Elasticsearch API documentation} */ async cacheStats (this: That, params?: T.SearchableSnapshotsCacheStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async cacheStats (this: That, params?: T.SearchableSnapshotsCacheStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> async cacheStats (this: That, params?: T.SearchableSnapshotsCacheStatsRequest, options?: TransportRequestOptions): Promise async cacheStats (this: That, params?: T.SearchableSnapshotsCacheStatsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['node_id'] + const { + path: acceptedPath + } = this.acceptedParams['searchable_snapshots.cache_stats'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -95,13 +139,16 @@ export default class SearchableSnapshots { /** * Clear the cache. Clear indices and data streams from the shared cache for partially mounted indices. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-searchable-snapshots-clear-cache | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-searchable-snapshots-clear-cache | Elasticsearch API documentation} */ async clearCache (this: That, params?: T.SearchableSnapshotsClearCacheRequest, options?: TransportRequestOptionsWithOutMeta): Promise async clearCache (this: That, params?: T.SearchableSnapshotsClearCacheRequest, options?: TransportRequestOptionsWithMeta): Promise> async clearCache (this: That, params?: T.SearchableSnapshotsClearCacheRequest, options?: TransportRequestOptions): Promise async clearCache (this: That, params?: T.SearchableSnapshotsClearCacheRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] + const { + path: acceptedPath + } = this.acceptedParams['searchable_snapshots.clear_cache'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -145,14 +192,18 @@ export default class SearchableSnapshots { /** * Mount a snapshot. Mount a snapshot as a searchable snapshot index. Do not use this API for snapshots managed by index lifecycle management (ILM). Manually mounting ILM-managed snapshots can interfere with ILM processes. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-searchable-snapshots-mount | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-searchable-snapshots-mount | Elasticsearch API documentation} */ async mount (this: That, params: T.SearchableSnapshotsMountRequest, options?: TransportRequestOptionsWithOutMeta): Promise async mount (this: That, params: T.SearchableSnapshotsMountRequest, options?: TransportRequestOptionsWithMeta): Promise> async mount (this: That, params: T.SearchableSnapshotsMountRequest, options?: TransportRequestOptions): Promise async mount (this: That, params: T.SearchableSnapshotsMountRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['repository', 'snapshot'] - const acceptedBody: string[] = ['index', 'renamed_index', 'index_settings', 'ignore_index_settings'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['searchable_snapshots.mount'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -174,8 +225,14 @@ export default class SearchableSnapshots { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -193,13 +250,16 @@ export default class SearchableSnapshots { /** * Get searchable snapshot statistics. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-searchable-snapshots-stats | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-searchable-snapshots-stats | Elasticsearch API documentation} */ async stats (this: That, params?: T.SearchableSnapshotsStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async stats (this: That, params?: T.SearchableSnapshotsStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> async stats (this: That, params?: T.SearchableSnapshotsStatsRequest, options?: TransportRequestOptions): Promise async stats (this: That, params?: T.SearchableSnapshotsStatsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] + const { + path: acceptedPath + } = this.acceptedParams['searchable_snapshots.stats'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/security.ts b/src/api/api/security.ts index 3484f5933..850ac0095 100644 --- a/src/api/api/security.ts +++ b/src/api/api/security.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,24 +21,664 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class Security { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'security.activate_user_profile': { + path: [], + body: [ + 'access_token', + 'grant_type', + 'password', + 'username' + ], + query: [] + }, + 'security.authenticate': { + path: [], + body: [], + query: [] + }, + 'security.bulk_delete_role': { + path: [], + body: [ + 'names' + ], + query: [ + 'refresh' + ] + }, + 'security.bulk_put_role': { + path: [], + body: [ + 'roles' + ], + query: [ + 'refresh' + ] + }, + 'security.bulk_update_api_keys': { + path: [], + body: [ + 'expiration', + 'ids', + 'metadata', + 'role_descriptors' + ], + query: [] + }, + 'security.change_password': { + path: [ + 'username' + ], + body: [ + 'password', + 'password_hash' + ], + query: [ + 'refresh' + ] + }, + 'security.clear_api_key_cache': { + path: [ + 'ids' + ], + body: [], + query: [] + }, + 'security.clear_cached_privileges': { + path: [ + 'application' + ], + body: [], + query: [] + }, + 'security.clear_cached_realms': { + path: [ + 'realms' + ], + body: [], + query: [ + 'usernames' + ] + }, + 'security.clear_cached_roles': { + path: [ + 'name' + ], + body: [], + query: [] + }, + 'security.clear_cached_service_tokens': { + path: [ + 'namespace', + 'service', + 'name' + ], + body: [], + query: [] + }, + 'security.create_api_key': { + path: [], + body: [ + 'expiration', + 'name', + 'role_descriptors', + 'metadata' + ], + query: [ + 'refresh' + ] + }, + 'security.create_cross_cluster_api_key': { + path: [], + body: [ + 'access', + 'expiration', + 'metadata', + 'name' + ], + query: [] + }, + 'security.create_service_token': { + path: [ + 'namespace', + 'service', + 'name' + ], + body: [], + query: [ + 'refresh' + ] + }, + 'security.delegate_pki': { + path: [], + body: [ + 'x509_certificate_chain' + ], + query: [] + }, + 'security.delete_privileges': { + path: [ + 'application', + 'name' + ], + body: [], + query: [ + 'refresh' + ] + }, + 'security.delete_role': { + path: [ + 'name' + ], + body: [], + query: [ + 'refresh' + ] + }, + 'security.delete_role_mapping': { + path: [ + 'name' + ], + body: [], + query: [ + 'refresh' + ] + }, + 'security.delete_service_token': { + path: [ + 'namespace', + 'service', + 'name' + ], + body: [], + query: [ + 'refresh' + ] + }, + 'security.delete_user': { + path: [ + 'username' + ], + body: [], + query: [ + 'refresh' + ] + }, + 'security.disable_user': { + path: [ + 'username' + ], + body: [], + query: [ + 'refresh' + ] + }, + 'security.disable_user_profile': { + path: [ + 'uid' + ], + body: [], + query: [ + 'refresh' + ] + }, + 'security.enable_user': { + path: [ + 'username' + ], + body: [], + query: [ + 'refresh' + ] + }, + 'security.enable_user_profile': { + path: [ + 'uid' + ], + body: [], + query: [ + 'refresh' + ] + }, + 'security.enroll_kibana': { + path: [], + body: [], + query: [] + }, + 'security.enroll_node': { + path: [], + body: [], + query: [] + }, + 'security.get_api_key': { + path: [], + body: [], + query: [ + 'id', + 'name', + 'owner', + 'realm_name', + 'username', + 'with_limited_by', + 'active_only', + 'with_profile_uid' + ] + }, + 'security.get_builtin_privileges': { + path: [], + body: [], + query: [] + }, + 'security.get_privileges': { + path: [ + 'application', + 'name' + ], + body: [], + query: [] + }, + 'security.get_role': { + path: [ + 'name' + ], + body: [], + query: [] + }, + 'security.get_role_mapping': { + path: [ + 'name' + ], + body: [], + query: [] + }, + 'security.get_service_accounts': { + path: [ + 'namespace', + 'service' + ], + body: [], + query: [] + }, + 'security.get_service_credentials': { + path: [ + 'namespace', + 'service' + ], + body: [], + query: [] + }, + 'security.get_settings': { + path: [], + body: [], + query: [ + 'master_timeout' + ] + }, + 'security.get_token': { + path: [], + body: [ + 'grant_type', + 'scope', + 'password', + 'kerberos_ticket', + 'refresh_token', + 'username' + ], + query: [] + }, + 'security.get_user': { + path: [ + 'username' + ], + body: [], + query: [ + 'with_profile_uid' + ] + }, + 'security.get_user_privileges': { + path: [], + body: [], + query: [ + 'application', + 'priviledge', + 'username' + ] + }, + 'security.get_user_profile': { + path: [ + 'uid' + ], + body: [], + query: [ + 'data' + ] + }, + 'security.grant_api_key': { + path: [], + body: [ + 'api_key', + 'grant_type', + 'access_token', + 'username', + 'password', + 'run_as' + ], + query: [] + }, + 'security.has_privileges': { + path: [ + 'user' + ], + body: [ + 'application', + 'cluster', + 'index' + ], + query: [] + }, + 'security.has_privileges_user_profile': { + path: [], + body: [ + 'uids', + 'privileges' + ], + query: [] + }, + 'security.invalidate_api_key': { + path: [], + body: [ + 'id', + 'ids', + 'name', + 'owner', + 'realm_name', + 'username' + ], + query: [] + }, + 'security.invalidate_token': { + path: [], + body: [ + 'token', + 'refresh_token', + 'realm_name', + 'username' + ], + query: [] + }, + 'security.oidc_authenticate': { + path: [], + body: [ + 'nonce', + 'realm', + 'redirect_uri', + 'state' + ], + query: [] + }, + 'security.oidc_logout': { + path: [], + body: [ + 'token', + 'refresh_token' + ], + query: [] + }, + 'security.oidc_prepare_authentication': { + path: [], + body: [ + 'iss', + 'login_hint', + 'nonce', + 'realm', + 'state' + ], + query: [] + }, + 'security.put_privileges': { + path: [], + body: [ + 'privileges' + ], + query: [ + 'refresh' + ] + }, + 'security.put_role': { + path: [ + 'name' + ], + body: [ + 'applications', + 'cluster', + 'global', + 'indices', + 'remote_indices', + 'remote_cluster', + 'metadata', + 'run_as', + 'description', + 'transient_metadata' + ], + query: [ + 'refresh' + ] + }, + 'security.put_role_mapping': { + path: [ + 'name' + ], + body: [ + 'enabled', + 'metadata', + 'roles', + 'role_templates', + 'rules', + 'run_as' + ], + query: [ + 'refresh' + ] + }, + 'security.put_user': { + path: [], + body: [ + 'username', + 'email', + 'full_name', + 'metadata', + 'password', + 'password_hash', + 'roles', + 'enabled' + ], + query: [ + 'refresh' + ] + }, + 'security.query_api_keys': { + path: [], + body: [ + 'aggregations', + 'aggs', + 'query', + 'from', + 'sort', + 'size', + 'search_after' + ], + query: [ + 'with_limited_by', + 'with_profile_uid', + 'typed_keys' + ] + }, + 'security.query_role': { + path: [], + body: [ + 'query', + 'from', + 'sort', + 'size', + 'search_after' + ], + query: [] + }, + 'security.query_user': { + path: [], + body: [ + 'query', + 'from', + 'sort', + 'size', + 'search_after' + ], + query: [ + 'with_profile_uid' + ] + }, + 'security.saml_authenticate': { + path: [], + body: [ + 'content', + 'ids', + 'realm' + ], + query: [] + }, + 'security.saml_complete_logout': { + path: [], + body: [ + 'realm', + 'ids', + 'query_string', + 'content' + ], + query: [] + }, + 'security.saml_invalidate': { + path: [], + body: [ + 'acs', + 'query_string', + 'realm' + ], + query: [] + }, + 'security.saml_logout': { + path: [], + body: [ + 'token', + 'refresh_token' + ], + query: [] + }, + 'security.saml_prepare_authentication': { + path: [], + body: [ + 'acs', + 'realm', + 'relay_state' + ], + query: [] + }, + 'security.saml_service_provider_metadata': { + path: [ + 'realm_name' + ], + body: [], + query: [] + }, + 'security.suggest_user_profiles': { + path: [], + body: [ + 'name', + 'size', + 'data', + 'hint' + ], + query: [ + 'data' + ] + }, + 'security.update_api_key': { + path: [ + 'id' + ], + body: [ + 'role_descriptors', + 'metadata', + 'expiration' + ], + query: [] + }, + 'security.update_cross_cluster_api_key': { + path: [ + 'id' + ], + body: [ + 'access', + 'expiration', + 'metadata' + ], + query: [] + }, + 'security.update_settings': { + path: [], + body: [ + 'security', + 'security-profile', + 'security-tokens' + ], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'security.update_user_profile_data': { + path: [ + 'uid' + ], + body: [ + 'labels', + 'data' + ], + query: [ + 'if_seq_no', + 'if_primary_term', + 'refresh' + ] + } + } } /** * Activate a user profile. Create or update a user profile on behalf of another user. NOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions. Individual users and external applications should not call this API directly. The calling application must have either an `access_token` or a combination of `username` and `password` for the user that the profile document is intended for. Elastic reserves the right to change or remove this feature in future releases without prior notice. This API creates or updates a profile document for end users with information that is extracted from the user's authentication object including `username`, `full_name,` `roles`, and the authentication realm. For example, in the JWT `access_token` case, the profile user's `username` is extracted from the JWT token claim pointed to by the `claims.principal` setting of the JWT realm that authenticated the token. When updating a profile document, the API enables the document if it was disabled. Any updates do not change existing content for either the `labels` or `data` fields. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-activate-user-profile | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-security-activate-user-profile | Elasticsearch API documentation} */ async activateUserProfile (this: That, params: T.SecurityActivateUserProfileRequest, options?: TransportRequestOptionsWithOutMeta): Promise async activateUserProfile (this: That, params: T.SecurityActivateUserProfileRequest, options?: TransportRequestOptionsWithMeta): Promise> async activateUserProfile (this: That, params: T.SecurityActivateUserProfileRequest, options?: TransportRequestOptions): Promise async activateUserProfile (this: That, params: T.SecurityActivateUserProfileRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['access_token', 'grant_type', 'password', 'username'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['security.activate_user_profile'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -74,8 +700,14 @@ export default class Security { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -89,13 +721,16 @@ export default class Security { /** * Authenticate a user. Authenticates a user and returns information about the authenticated user. Include the user information in a [basic auth header](https://en.wikipedia.org/wiki/Basic_access_authentication). A successful call returns a JSON structure that shows user information such as their username, the roles that are assigned to the user, any assigned metadata, and information about the realms that authenticated and authorized the user. If the user cannot be authenticated, this API returns a 401 status code. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-authenticate | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-security-authenticate | Elasticsearch API documentation} */ async authenticate (this: That, params?: T.SecurityAuthenticateRequest, options?: TransportRequestOptionsWithOutMeta): Promise async authenticate (this: That, params?: T.SecurityAuthenticateRequest, options?: TransportRequestOptionsWithMeta): Promise> async authenticate (this: That, params?: T.SecurityAuthenticateRequest, options?: TransportRequestOptions): Promise async authenticate (this: That, params?: T.SecurityAuthenticateRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['security.authenticate'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -129,14 +764,18 @@ export default class Security { /** * Bulk delete roles. The role management APIs are generally the preferred way to manage roles, rather than using file-based role management. The bulk delete roles API cannot delete roles that are defined in roles files. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-bulk-delete-role | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-security-bulk-delete-role | Elasticsearch API documentation} */ async bulkDeleteRole (this: That, params: T.SecurityBulkDeleteRoleRequest, options?: TransportRequestOptionsWithOutMeta): Promise async bulkDeleteRole (this: That, params: T.SecurityBulkDeleteRoleRequest, options?: TransportRequestOptionsWithMeta): Promise> async bulkDeleteRole (this: That, params: T.SecurityBulkDeleteRoleRequest, options?: TransportRequestOptions): Promise async bulkDeleteRole (this: That, params: T.SecurityBulkDeleteRoleRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['names'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['security.bulk_delete_role'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -158,8 +797,14 @@ export default class Security { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -173,14 +818,18 @@ export default class Security { /** * Bulk create or update roles. The role management APIs are generally the preferred way to manage roles, rather than using file-based role management. The bulk create or update roles API cannot update roles that are defined in roles files. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-bulk-put-role | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-security-bulk-put-role | Elasticsearch API documentation} */ async bulkPutRole (this: That, params: T.SecurityBulkPutRoleRequest, options?: TransportRequestOptionsWithOutMeta): Promise async bulkPutRole (this: That, params: T.SecurityBulkPutRoleRequest, options?: TransportRequestOptionsWithMeta): Promise> async bulkPutRole (this: That, params: T.SecurityBulkPutRoleRequest, options?: TransportRequestOptions): Promise async bulkPutRole (this: That, params: T.SecurityBulkPutRoleRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['roles'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['security.bulk_put_role'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -202,8 +851,14 @@ export default class Security { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -217,14 +872,18 @@ export default class Security { /** * Bulk update API keys. Update the attributes for multiple API keys. IMPORTANT: It is not possible to use an API key as the authentication credential for this API. To update API keys, the owner user's credentials are required. This API is similar to the update API key API but enables you to apply the same update to multiple API keys in one API call. This operation can greatly improve performance over making individual updates. It is not possible to update expired or invalidated API keys. This API supports updates to API key access scope, metadata and expiration. The access scope of each API key is derived from the `role_descriptors` you specify in the request and a snapshot of the owner user's permissions at the time of the request. The snapshot of the owner's permissions is updated automatically on every call. IMPORTANT: If you don't specify `role_descriptors` in the request, a call to this API might still change an API key's access scope. This change can occur if the owner user's permissions have changed since the API key was created or last modified. A successful request returns a JSON structure that contains the IDs of all updated API keys, the IDs of API keys that already had the requested changes and did not require an update, and error details for any failed update. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-bulk-update-api-keys | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-security-bulk-update-api-keys | Elasticsearch API documentation} */ async bulkUpdateApiKeys (this: That, params: T.SecurityBulkUpdateApiKeysRequest, options?: TransportRequestOptionsWithOutMeta): Promise async bulkUpdateApiKeys (this: That, params: T.SecurityBulkUpdateApiKeysRequest, options?: TransportRequestOptionsWithMeta): Promise> async bulkUpdateApiKeys (this: That, params: T.SecurityBulkUpdateApiKeysRequest, options?: TransportRequestOptions): Promise async bulkUpdateApiKeys (this: That, params: T.SecurityBulkUpdateApiKeysRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['expiration', 'ids', 'metadata', 'role_descriptors'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['security.bulk_update_api_keys'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -246,8 +905,14 @@ export default class Security { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -261,14 +926,18 @@ export default class Security { /** * Change passwords. Change the passwords of users in the native realm and built-in users. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-change-password | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-security-change-password | Elasticsearch API documentation} */ async changePassword (this: That, params?: T.SecurityChangePasswordRequest, options?: TransportRequestOptionsWithOutMeta): Promise async changePassword (this: That, params?: T.SecurityChangePasswordRequest, options?: TransportRequestOptionsWithMeta): Promise> async changePassword (this: That, params?: T.SecurityChangePasswordRequest, options?: TransportRequestOptions): Promise async changePassword (this: That, params?: T.SecurityChangePasswordRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['username'] - const acceptedBody: string[] = ['password', 'password_hash'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['security.change_password'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -291,8 +960,14 @@ export default class Security { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -316,13 +991,16 @@ export default class Security { /** * Clear the API key cache. Evict a subset of all entries from the API key cache. The cache is also automatically cleared on state changes of the security index. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-clear-api-key-cache | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-security-clear-api-key-cache | Elasticsearch API documentation} */ async clearApiKeyCache (this: That, params: T.SecurityClearApiKeyCacheRequest, options?: TransportRequestOptionsWithOutMeta): Promise async clearApiKeyCache (this: That, params: T.SecurityClearApiKeyCacheRequest, options?: TransportRequestOptionsWithMeta): Promise> async clearApiKeyCache (this: That, params: T.SecurityClearApiKeyCacheRequest, options?: TransportRequestOptions): Promise async clearApiKeyCache (this: That, params: T.SecurityClearApiKeyCacheRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['ids'] + const { + path: acceptedPath + } = this.acceptedParams['security.clear_api_key_cache'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -358,13 +1036,16 @@ export default class Security { /** * Clear the privileges cache. Evict privileges from the native application privilege cache. The cache is also automatically cleared for applications that have their privileges updated. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-clear-cached-privileges | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-security-clear-cached-privileges | Elasticsearch API documentation} */ async clearCachedPrivileges (this: That, params: T.SecurityClearCachedPrivilegesRequest, options?: TransportRequestOptionsWithOutMeta): Promise async clearCachedPrivileges (this: That, params: T.SecurityClearCachedPrivilegesRequest, options?: TransportRequestOptionsWithMeta): Promise> async clearCachedPrivileges (this: That, params: T.SecurityClearCachedPrivilegesRequest, options?: TransportRequestOptions): Promise async clearCachedPrivileges (this: That, params: T.SecurityClearCachedPrivilegesRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['application'] + const { + path: acceptedPath + } = this.acceptedParams['security.clear_cached_privileges'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -400,13 +1081,16 @@ export default class Security { /** * Clear the user cache. Evict users from the user cache. You can completely clear the cache or evict specific users. User credentials are cached in memory on each node to avoid connecting to a remote authentication service or hitting the disk for every incoming request. There are realm settings that you can use to configure the user cache. For more information, refer to the documentation about controlling the user cache. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-clear-cached-realms | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-security-clear-cached-realms | Elasticsearch API documentation} */ async clearCachedRealms (this: That, params: T.SecurityClearCachedRealmsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async clearCachedRealms (this: That, params: T.SecurityClearCachedRealmsRequest, options?: TransportRequestOptionsWithMeta): Promise> async clearCachedRealms (this: That, params: T.SecurityClearCachedRealmsRequest, options?: TransportRequestOptions): Promise async clearCachedRealms (this: That, params: T.SecurityClearCachedRealmsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['realms'] + const { + path: acceptedPath + } = this.acceptedParams['security.clear_cached_realms'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -442,13 +1126,16 @@ export default class Security { /** * Clear the roles cache. Evict roles from the native role cache. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-clear-cached-roles | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-security-clear-cached-roles | Elasticsearch API documentation} */ async clearCachedRoles (this: That, params: T.SecurityClearCachedRolesRequest, options?: TransportRequestOptionsWithOutMeta): Promise async clearCachedRoles (this: That, params: T.SecurityClearCachedRolesRequest, options?: TransportRequestOptionsWithMeta): Promise> async clearCachedRoles (this: That, params: T.SecurityClearCachedRolesRequest, options?: TransportRequestOptions): Promise async clearCachedRoles (this: That, params: T.SecurityClearCachedRolesRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['security.clear_cached_roles'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -484,13 +1171,16 @@ export default class Security { /** * Clear service account token caches. Evict a subset of all entries from the service account token caches. Two separate caches exist for service account tokens: one cache for tokens backed by the `service_tokens` file, and another for tokens backed by the `.security` index. This API clears matching entries from both caches. The cache for service account tokens backed by the `.security` index is cleared automatically on state changes of the security index. The cache for tokens backed by the `service_tokens` file is cleared automatically on file changes. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-clear-cached-service-tokens | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-security-clear-cached-service-tokens | Elasticsearch API documentation} */ async clearCachedServiceTokens (this: That, params: T.SecurityClearCachedServiceTokensRequest, options?: TransportRequestOptionsWithOutMeta): Promise async clearCachedServiceTokens (this: That, params: T.SecurityClearCachedServiceTokensRequest, options?: TransportRequestOptionsWithMeta): Promise> async clearCachedServiceTokens (this: That, params: T.SecurityClearCachedServiceTokensRequest, options?: TransportRequestOptions): Promise async clearCachedServiceTokens (this: That, params: T.SecurityClearCachedServiceTokensRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['namespace', 'service', 'name'] + const { + path: acceptedPath + } = this.acceptedParams['security.clear_cached_service_tokens'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -528,14 +1218,18 @@ export default class Security { /** * Create an API key. Create an API key for access without requiring basic authentication. IMPORTANT: If the credential that is used to authenticate this request is an API key, the derived API key cannot have any privileges. If you specify privileges, the API returns an error. A successful request returns a JSON structure that contains the API key, its unique id, and its name. If applicable, it also returns expiration information for the API key in milliseconds. NOTE: By default, API keys never expire. You can specify expiration information when you create the API keys. The API keys are created by the Elasticsearch API key service, which is automatically enabled. To configure or turn off the API key service, refer to API key service setting documentation. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-create-api-key | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-security-create-api-key | Elasticsearch API documentation} */ async createApiKey (this: That, params?: T.SecurityCreateApiKeyRequest, options?: TransportRequestOptionsWithOutMeta): Promise async createApiKey (this: That, params?: T.SecurityCreateApiKeyRequest, options?: TransportRequestOptionsWithMeta): Promise> async createApiKey (this: That, params?: T.SecurityCreateApiKeyRequest, options?: TransportRequestOptions): Promise async createApiKey (this: That, params?: T.SecurityCreateApiKeyRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['expiration', 'name', 'role_descriptors', 'metadata'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['security.create_api_key'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -558,8 +1252,14 @@ export default class Security { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -573,14 +1273,18 @@ export default class Security { /** * Create a cross-cluster API key. Create an API key of the `cross_cluster` type for the API key based remote cluster access. A `cross_cluster` API key cannot be used to authenticate through the REST interface. IMPORTANT: To authenticate this request you must use a credential that is not an API key. Even if you use an API key that has the required privilege, the API returns an error. Cross-cluster API keys are created by the Elasticsearch API key service, which is automatically enabled. NOTE: Unlike REST API keys, a cross-cluster API key does not capture permissions of the authenticated user. The API key’s effective permission is exactly as specified with the `access` property. A successful request returns a JSON structure that contains the API key, its unique ID, and its name. If applicable, it also returns expiration information for the API key in milliseconds. By default, API keys never expire. You can specify expiration information when you create the API keys. Cross-cluster API keys can only be updated with the update cross-cluster API key API. Attempting to update them with the update REST API key API or the bulk update REST API keys API will result in an error. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-create-cross-cluster-api-key | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-security-create-cross-cluster-api-key | Elasticsearch API documentation} */ async createCrossClusterApiKey (this: That, params: T.SecurityCreateCrossClusterApiKeyRequest, options?: TransportRequestOptionsWithOutMeta): Promise async createCrossClusterApiKey (this: That, params: T.SecurityCreateCrossClusterApiKeyRequest, options?: TransportRequestOptionsWithMeta): Promise> async createCrossClusterApiKey (this: That, params: T.SecurityCreateCrossClusterApiKeyRequest, options?: TransportRequestOptions): Promise async createCrossClusterApiKey (this: That, params: T.SecurityCreateCrossClusterApiKeyRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['access', 'expiration', 'metadata', 'name'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['security.create_cross_cluster_api_key'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -602,8 +1306,14 @@ export default class Security { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -617,13 +1327,16 @@ export default class Security { /** * Create a service account token. Create a service accounts token for access without requiring basic authentication. NOTE: Service account tokens never expire. You must actively delete them if they are no longer needed. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-create-service-token | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-security-create-service-token | Elasticsearch API documentation} */ async createServiceToken (this: That, params: T.SecurityCreateServiceTokenRequest, options?: TransportRequestOptionsWithOutMeta): Promise async createServiceToken (this: That, params: T.SecurityCreateServiceTokenRequest, options?: TransportRequestOptionsWithMeta): Promise> async createServiceToken (this: That, params: T.SecurityCreateServiceTokenRequest, options?: TransportRequestOptions): Promise async createServiceToken (this: That, params: T.SecurityCreateServiceTokenRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['namespace', 'service', 'name'] + const { + path: acceptedPath + } = this.acceptedParams['security.create_service_token'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -668,14 +1381,18 @@ export default class Security { /** * Delegate PKI authentication. This API implements the exchange of an X509Certificate chain for an Elasticsearch access token. The certificate chain is validated, according to RFC 5280, by sequentially considering the trust configuration of every installed PKI realm that has `delegation.enabled` set to `true`. A successfully trusted client certificate is also subject to the validation of the subject distinguished name according to thw `username_pattern` of the respective realm. This API is called by smart and trusted proxies, such as Kibana, which terminate the user's TLS session but still want to authenticate the user by using a PKI realm—-as if the user connected directly to Elasticsearch. IMPORTANT: The association between the subject public key in the target certificate and the corresponding private key is not validated. This is part of the TLS authentication process and it is delegated to the proxy that calls this API. The proxy is trusted to have performed the TLS authentication and this API translates that authentication into an Elasticsearch access token. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-delegate-pki | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-security-delegate-pki | Elasticsearch API documentation} */ async delegatePki (this: That, params: T.SecurityDelegatePkiRequest, options?: TransportRequestOptionsWithOutMeta): Promise async delegatePki (this: That, params: T.SecurityDelegatePkiRequest, options?: TransportRequestOptionsWithMeta): Promise> async delegatePki (this: That, params: T.SecurityDelegatePkiRequest, options?: TransportRequestOptions): Promise async delegatePki (this: That, params: T.SecurityDelegatePkiRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['x509_certificate_chain'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['security.delegate_pki'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -697,8 +1414,14 @@ export default class Security { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -712,13 +1435,16 @@ export default class Security { /** * Delete application privileges. To use this API, you must have one of the following privileges: * The `manage_security` cluster privilege (or a greater privilege such as `all`). * The "Manage Application Privileges" global privilege for the application being referenced in the request. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-delete-privileges | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-security-delete-privileges | Elasticsearch API documentation} */ async deletePrivileges (this: That, params: T.SecurityDeletePrivilegesRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deletePrivileges (this: That, params: T.SecurityDeletePrivilegesRequest, options?: TransportRequestOptionsWithMeta): Promise> async deletePrivileges (this: That, params: T.SecurityDeletePrivilegesRequest, options?: TransportRequestOptions): Promise async deletePrivileges (this: That, params: T.SecurityDeletePrivilegesRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['application', 'name'] + const { + path: acceptedPath + } = this.acceptedParams['security.delete_privileges'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -755,13 +1481,16 @@ export default class Security { /** * Delete roles. Delete roles in the native realm. The role management APIs are generally the preferred way to manage roles, rather than using file-based role management. The delete roles API cannot remove roles that are defined in roles files. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-delete-role | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-security-delete-role | Elasticsearch API documentation} */ async deleteRole (this: That, params: T.SecurityDeleteRoleRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteRole (this: That, params: T.SecurityDeleteRoleRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteRole (this: That, params: T.SecurityDeleteRoleRequest, options?: TransportRequestOptions): Promise async deleteRole (this: That, params: T.SecurityDeleteRoleRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['security.delete_role'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -797,13 +1526,16 @@ export default class Security { /** * Delete role mappings. Role mappings define which roles are assigned to each user. The role mapping APIs are generally the preferred way to manage role mappings rather than using role mapping files. The delete role mappings API cannot remove role mappings that are defined in role mapping files. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-delete-role-mapping | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-security-delete-role-mapping | Elasticsearch API documentation} */ async deleteRoleMapping (this: That, params: T.SecurityDeleteRoleMappingRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteRoleMapping (this: That, params: T.SecurityDeleteRoleMappingRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteRoleMapping (this: That, params: T.SecurityDeleteRoleMappingRequest, options?: TransportRequestOptions): Promise async deleteRoleMapping (this: That, params: T.SecurityDeleteRoleMappingRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['security.delete_role_mapping'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -839,13 +1571,16 @@ export default class Security { /** * Delete service account tokens. Delete service account tokens for a service in a specified namespace. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-delete-service-token | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-security-delete-service-token | Elasticsearch API documentation} */ async deleteServiceToken (this: That, params: T.SecurityDeleteServiceTokenRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteServiceToken (this: That, params: T.SecurityDeleteServiceTokenRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteServiceToken (this: That, params: T.SecurityDeleteServiceTokenRequest, options?: TransportRequestOptions): Promise async deleteServiceToken (this: That, params: T.SecurityDeleteServiceTokenRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['namespace', 'service', 'name'] + const { + path: acceptedPath + } = this.acceptedParams['security.delete_service_token'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -883,13 +1618,16 @@ export default class Security { /** * Delete users. Delete users from the native realm. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-delete-user | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-security-delete-user | Elasticsearch API documentation} */ async deleteUser (this: That, params: T.SecurityDeleteUserRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteUser (this: That, params: T.SecurityDeleteUserRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteUser (this: That, params: T.SecurityDeleteUserRequest, options?: TransportRequestOptions): Promise async deleteUser (this: That, params: T.SecurityDeleteUserRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['username'] + const { + path: acceptedPath + } = this.acceptedParams['security.delete_user'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -925,13 +1663,16 @@ export default class Security { /** * Disable users. Disable users in the native realm. By default, when you create users, they are enabled. You can use this API to revoke a user's access to Elasticsearch. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-disable-user | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-security-disable-user | Elasticsearch API documentation} */ async disableUser (this: That, params: T.SecurityDisableUserRequest, options?: TransportRequestOptionsWithOutMeta): Promise async disableUser (this: That, params: T.SecurityDisableUserRequest, options?: TransportRequestOptionsWithMeta): Promise> async disableUser (this: That, params: T.SecurityDisableUserRequest, options?: TransportRequestOptions): Promise async disableUser (this: That, params: T.SecurityDisableUserRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['username'] + const { + path: acceptedPath + } = this.acceptedParams['security.disable_user'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -967,13 +1708,16 @@ export default class Security { /** * Disable a user profile. Disable user profiles so that they are not visible in user profile searches. NOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions. Individual users and external applications should not call this API directly. Elastic reserves the right to change or remove this feature in future releases without prior notice. When you activate a user profile, its automatically enabled and visible in user profile searches. You can use the disable user profile API to disable a user profile so it’s not visible in these searches. To re-enable a disabled user profile, use the enable user profile API . - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-disable-user-profile | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-security-disable-user-profile | Elasticsearch API documentation} */ async disableUserProfile (this: That, params: T.SecurityDisableUserProfileRequest, options?: TransportRequestOptionsWithOutMeta): Promise async disableUserProfile (this: That, params: T.SecurityDisableUserProfileRequest, options?: TransportRequestOptionsWithMeta): Promise> async disableUserProfile (this: That, params: T.SecurityDisableUserProfileRequest, options?: TransportRequestOptions): Promise async disableUserProfile (this: That, params: T.SecurityDisableUserProfileRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['uid'] + const { + path: acceptedPath + } = this.acceptedParams['security.disable_user_profile'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1009,13 +1753,16 @@ export default class Security { /** * Enable users. Enable users in the native realm. By default, when you create users, they are enabled. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-enable-user | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-security-enable-user | Elasticsearch API documentation} */ async enableUser (this: That, params: T.SecurityEnableUserRequest, options?: TransportRequestOptionsWithOutMeta): Promise async enableUser (this: That, params: T.SecurityEnableUserRequest, options?: TransportRequestOptionsWithMeta): Promise> async enableUser (this: That, params: T.SecurityEnableUserRequest, options?: TransportRequestOptions): Promise async enableUser (this: That, params: T.SecurityEnableUserRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['username'] + const { + path: acceptedPath + } = this.acceptedParams['security.enable_user'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1051,13 +1798,16 @@ export default class Security { /** * Enable a user profile. Enable user profiles to make them visible in user profile searches. NOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions. Individual users and external applications should not call this API directly. Elastic reserves the right to change or remove this feature in future releases without prior notice. When you activate a user profile, it's automatically enabled and visible in user profile searches. If you later disable the user profile, you can use the enable user profile API to make the profile visible in these searches again. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-enable-user-profile | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-security-enable-user-profile | Elasticsearch API documentation} */ async enableUserProfile (this: That, params: T.SecurityEnableUserProfileRequest, options?: TransportRequestOptionsWithOutMeta): Promise async enableUserProfile (this: That, params: T.SecurityEnableUserProfileRequest, options?: TransportRequestOptionsWithMeta): Promise> async enableUserProfile (this: That, params: T.SecurityEnableUserProfileRequest, options?: TransportRequestOptions): Promise async enableUserProfile (this: That, params: T.SecurityEnableUserProfileRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['uid'] + const { + path: acceptedPath + } = this.acceptedParams['security.enable_user_profile'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1093,13 +1843,16 @@ export default class Security { /** * Enroll Kibana. Enable a Kibana instance to configure itself for communication with a secured Elasticsearch cluster. NOTE: This API is currently intended for internal use only by Kibana. Kibana uses this API internally to configure itself for communications with an Elasticsearch cluster that already has security features enabled. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-enroll-kibana | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-security-enroll-kibana | Elasticsearch API documentation} */ async enrollKibana (this: That, params?: T.SecurityEnrollKibanaRequest, options?: TransportRequestOptionsWithOutMeta): Promise async enrollKibana (this: That, params?: T.SecurityEnrollKibanaRequest, options?: TransportRequestOptionsWithMeta): Promise> async enrollKibana (this: That, params?: T.SecurityEnrollKibanaRequest, options?: TransportRequestOptions): Promise async enrollKibana (this: That, params?: T.SecurityEnrollKibanaRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['security.enroll_kibana'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1133,13 +1886,16 @@ export default class Security { /** * Enroll a node. Enroll a new node to allow it to join an existing cluster with security features enabled. The response contains all the necessary information for the joining node to bootstrap discovery and security related settings so that it can successfully join the cluster. The response contains key and certificate material that allows the caller to generate valid signed certificates for the HTTP layer of all nodes in the cluster. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-enroll-node | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-security-enroll-node | Elasticsearch API documentation} */ async enrollNode (this: That, params?: T.SecurityEnrollNodeRequest, options?: TransportRequestOptionsWithOutMeta): Promise async enrollNode (this: That, params?: T.SecurityEnrollNodeRequest, options?: TransportRequestOptionsWithMeta): Promise> async enrollNode (this: That, params?: T.SecurityEnrollNodeRequest, options?: TransportRequestOptions): Promise async enrollNode (this: That, params?: T.SecurityEnrollNodeRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['security.enroll_node'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1173,13 +1929,16 @@ export default class Security { /** * Get API key information. Retrieves information for one or more API keys. NOTE: If you have only the `manage_own_api_key` privilege, this API returns only the API keys that you own. If you have `read_security`, `manage_api_key` or greater privileges (including `manage_security`), this API returns all API keys regardless of ownership. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-api-key | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-security-get-api-key | Elasticsearch API documentation} */ async getApiKey (this: That, params?: T.SecurityGetApiKeyRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getApiKey (this: That, params?: T.SecurityGetApiKeyRequest, options?: TransportRequestOptionsWithMeta): Promise> async getApiKey (this: That, params?: T.SecurityGetApiKeyRequest, options?: TransportRequestOptions): Promise async getApiKey (this: That, params?: T.SecurityGetApiKeyRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['security.get_api_key'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1213,13 +1972,16 @@ export default class Security { /** * Get builtin privileges. Get the list of cluster privileges and index privileges that are available in this version of Elasticsearch. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-builtin-privileges | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-security-get-builtin-privileges | Elasticsearch API documentation} */ async getBuiltinPrivileges (this: That, params?: T.SecurityGetBuiltinPrivilegesRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getBuiltinPrivileges (this: That, params?: T.SecurityGetBuiltinPrivilegesRequest, options?: TransportRequestOptionsWithMeta): Promise> async getBuiltinPrivileges (this: That, params?: T.SecurityGetBuiltinPrivilegesRequest, options?: TransportRequestOptions): Promise async getBuiltinPrivileges (this: That, params?: T.SecurityGetBuiltinPrivilegesRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['security.get_builtin_privileges'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1253,13 +2015,16 @@ export default class Security { /** * Get application privileges. To use this API, you must have one of the following privileges: * The `read_security` cluster privilege (or a greater privilege such as `manage_security` or `all`). * The "Manage Application Privileges" global privilege for the application being referenced in the request. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-privileges | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-security-get-privileges | Elasticsearch API documentation} */ async getPrivileges (this: That, params?: T.SecurityGetPrivilegesRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getPrivileges (this: That, params?: T.SecurityGetPrivilegesRequest, options?: TransportRequestOptionsWithMeta): Promise> async getPrivileges (this: That, params?: T.SecurityGetPrivilegesRequest, options?: TransportRequestOptions): Promise async getPrivileges (this: That, params?: T.SecurityGetPrivilegesRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['application', 'name'] + const { + path: acceptedPath + } = this.acceptedParams['security.get_privileges'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1307,13 +2072,16 @@ export default class Security { /** * Get roles. Get roles in the native realm. The role management APIs are generally the preferred way to manage roles, rather than using file-based role management. The get roles API cannot retrieve roles that are defined in roles files. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-role | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-security-get-role | Elasticsearch API documentation} */ async getRole (this: That, params?: T.SecurityGetRoleRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getRole (this: That, params?: T.SecurityGetRoleRequest, options?: TransportRequestOptionsWithMeta): Promise> async getRole (this: That, params?: T.SecurityGetRoleRequest, options?: TransportRequestOptions): Promise async getRole (this: That, params?: T.SecurityGetRoleRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['security.get_role'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1357,13 +2125,16 @@ export default class Security { /** * Get role mappings. Role mappings define which roles are assigned to each user. The role mapping APIs are generally the preferred way to manage role mappings rather than using role mapping files. The get role mappings API cannot retrieve role mappings that are defined in role mapping files. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-role-mapping | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-security-get-role-mapping | Elasticsearch API documentation} */ async getRoleMapping (this: That, params?: T.SecurityGetRoleMappingRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getRoleMapping (this: That, params?: T.SecurityGetRoleMappingRequest, options?: TransportRequestOptionsWithMeta): Promise> async getRoleMapping (this: That, params?: T.SecurityGetRoleMappingRequest, options?: TransportRequestOptions): Promise async getRoleMapping (this: That, params?: T.SecurityGetRoleMappingRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['security.get_role_mapping'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1407,13 +2178,16 @@ export default class Security { /** * Get service accounts. Get a list of service accounts that match the provided path parameters. NOTE: Currently, only the `elastic/fleet-server` service account is available. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-service-accounts | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-security-get-service-accounts | Elasticsearch API documentation} */ async getServiceAccounts (this: That, params?: T.SecurityGetServiceAccountsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getServiceAccounts (this: That, params?: T.SecurityGetServiceAccountsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getServiceAccounts (this: That, params?: T.SecurityGetServiceAccountsRequest, options?: TransportRequestOptions): Promise async getServiceAccounts (this: That, params?: T.SecurityGetServiceAccountsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['namespace', 'service'] + const { + path: acceptedPath + } = this.acceptedParams['security.get_service_accounts'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1461,13 +2235,16 @@ export default class Security { /** * Get service account credentials. To use this API, you must have at least the `read_security` cluster privilege (or a greater privilege such as `manage_service_account` or `manage_security`). The response includes service account tokens that were created with the create service account tokens API as well as file-backed tokens from all nodes of the cluster. NOTE: For tokens backed by the `service_tokens` file, the API collects them from all nodes of the cluster. Tokens with the same name from different nodes are assumed to be the same token and are only counted once towards the total number of service tokens. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-service-credentials | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-security-get-service-credentials | Elasticsearch API documentation} */ async getServiceCredentials (this: That, params: T.SecurityGetServiceCredentialsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getServiceCredentials (this: That, params: T.SecurityGetServiceCredentialsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getServiceCredentials (this: That, params: T.SecurityGetServiceCredentialsRequest, options?: TransportRequestOptions): Promise async getServiceCredentials (this: That, params: T.SecurityGetServiceCredentialsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['namespace', 'service'] + const { + path: acceptedPath + } = this.acceptedParams['security.get_service_credentials'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1504,13 +2281,16 @@ export default class Security { /** * Get security index settings. Get the user-configurable settings for the security internal index (`.security` and associated indices). Only a subset of the index settings — those that are user-configurable—will be shown. This includes: * `index.auto_expand_replicas` * `index.number_of_replicas` - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-settings | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-security-get-settings | Elasticsearch API documentation} */ async getSettings (this: That, params?: T.SecurityGetSettingsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getSettings (this: That, params?: T.SecurityGetSettingsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getSettings (this: That, params?: T.SecurityGetSettingsRequest, options?: TransportRequestOptions): Promise async getSettings (this: That, params?: T.SecurityGetSettingsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['security.get_settings'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1544,14 +2324,18 @@ export default class Security { /** * Get a token. Create a bearer token for access without requiring basic authentication. The tokens are created by the Elasticsearch Token Service, which is automatically enabled when you configure TLS on the HTTP interface. Alternatively, you can explicitly enable the `xpack.security.authc.token.enabled` setting. When you are running in production mode, a bootstrap check prevents you from enabling the token service unless you also enable TLS on the HTTP interface. The get token API takes the same parameters as a typical OAuth 2.0 token API except for the use of a JSON request body. A successful get token API call returns a JSON structure that contains the access token, the amount of time (seconds) that the token expires in, the type, and the scope if available. The tokens returned by the get token API have a finite period of time for which they are valid and after that time period, they can no longer be used. That time period is defined by the `xpack.security.authc.token.timeout` setting. If you want to invalidate a token immediately, you can do so by using the invalidate token API. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-token | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-security-get-token | Elasticsearch API documentation} */ async getToken (this: That, params?: T.SecurityGetTokenRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getToken (this: That, params?: T.SecurityGetTokenRequest, options?: TransportRequestOptionsWithMeta): Promise> async getToken (this: That, params?: T.SecurityGetTokenRequest, options?: TransportRequestOptions): Promise async getToken (this: That, params?: T.SecurityGetTokenRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['grant_type', 'scope', 'password', 'kerberos_ticket', 'refresh_token', 'username'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['security.get_token'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1574,8 +2358,14 @@ export default class Security { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -1589,13 +2379,16 @@ export default class Security { /** * Get users. Get information about users in the native realm and built-in users. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-user | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-security-get-user | Elasticsearch API documentation} */ async getUser (this: That, params?: T.SecurityGetUserRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getUser (this: That, params?: T.SecurityGetUserRequest, options?: TransportRequestOptionsWithMeta): Promise> async getUser (this: That, params?: T.SecurityGetUserRequest, options?: TransportRequestOptions): Promise async getUser (this: That, params?: T.SecurityGetUserRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['username'] + const { + path: acceptedPath + } = this.acceptedParams['security.get_user'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1639,13 +2432,16 @@ export default class Security { /** * Get user privileges. Get the security privileges for the logged in user. All users can use this API, but only to determine their own privileges. To check the privileges of other users, you must use the run as feature. To check whether a user has a specific list of privileges, use the has privileges API. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-user-privileges | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-security-get-user-privileges | Elasticsearch API documentation} */ async getUserPrivileges (this: That, params?: T.SecurityGetUserPrivilegesRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getUserPrivileges (this: That, params?: T.SecurityGetUserPrivilegesRequest, options?: TransportRequestOptionsWithMeta): Promise> async getUserPrivileges (this: That, params?: T.SecurityGetUserPrivilegesRequest, options?: TransportRequestOptions): Promise async getUserPrivileges (this: That, params?: T.SecurityGetUserPrivilegesRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['security.get_user_privileges'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1679,13 +2475,16 @@ export default class Security { /** * Get a user profile. Get a user's profile using the unique profile ID. NOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions. Individual users and external applications should not call this API directly. Elastic reserves the right to change or remove this feature in future releases without prior notice. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-user-profile | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-security-get-user-profile | Elasticsearch API documentation} */ async getUserProfile (this: That, params: T.SecurityGetUserProfileRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getUserProfile (this: That, params: T.SecurityGetUserProfileRequest, options?: TransportRequestOptionsWithMeta): Promise> async getUserProfile (this: That, params: T.SecurityGetUserProfileRequest, options?: TransportRequestOptions): Promise async getUserProfile (this: That, params: T.SecurityGetUserProfileRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['uid'] + const { + path: acceptedPath + } = this.acceptedParams['security.get_user_profile'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1721,14 +2520,18 @@ export default class Security { /** * Grant an API key. Create an API key on behalf of another user. This API is similar to the create API keys API, however it creates the API key for a user that is different than the user that runs the API. The caller must have authentication credentials for the user on whose behalf the API key will be created. It is not possible to use this API to create an API key without that user's credentials. The supported user authentication credential types are: * username and password * Elasticsearch access tokens * JWTs The user, for whom the authentication credentials is provided, can optionally "run as" (impersonate) another user. In this case, the API key will be created on behalf of the impersonated user. This API is intended be used by applications that need to create and manage API keys for end users, but cannot guarantee that those users have permission to create API keys on their own behalf. The API keys are created by the Elasticsearch API key service, which is automatically enabled. A successful grant API key API call returns a JSON structure that contains the API key, its unique id, and its name. If applicable, it also returns expiration information for the API key in milliseconds. By default, API keys never expire. You can specify expiration information when you create the API keys. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-grant-api-key | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-security-grant-api-key | Elasticsearch API documentation} */ async grantApiKey (this: That, params: T.SecurityGrantApiKeyRequest, options?: TransportRequestOptionsWithOutMeta): Promise async grantApiKey (this: That, params: T.SecurityGrantApiKeyRequest, options?: TransportRequestOptionsWithMeta): Promise> async grantApiKey (this: That, params: T.SecurityGrantApiKeyRequest, options?: TransportRequestOptions): Promise async grantApiKey (this: That, params: T.SecurityGrantApiKeyRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['api_key', 'grant_type', 'access_token', 'username', 'password', 'run_as'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['security.grant_api_key'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1750,8 +2553,14 @@ export default class Security { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -1765,14 +2574,18 @@ export default class Security { /** * Check user privileges. Determine whether the specified user has a specified list of privileges. All users can use this API, but only to determine their own privileges. To check the privileges of other users, you must use the run as feature. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-has-privileges | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-security-has-privileges | Elasticsearch API documentation} */ async hasPrivileges (this: That, params?: T.SecurityHasPrivilegesRequest, options?: TransportRequestOptionsWithOutMeta): Promise async hasPrivileges (this: That, params?: T.SecurityHasPrivilegesRequest, options?: TransportRequestOptionsWithMeta): Promise> async hasPrivileges (this: That, params?: T.SecurityHasPrivilegesRequest, options?: TransportRequestOptions): Promise async hasPrivileges (this: That, params?: T.SecurityHasPrivilegesRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['user'] - const acceptedBody: string[] = ['application', 'cluster', 'index'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['security.has_privileges'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1795,8 +2608,14 @@ export default class Security { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -1820,14 +2639,18 @@ export default class Security { /** * Check user profile privileges. Determine whether the users associated with the specified user profile IDs have all the requested privileges. NOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions. Individual users and external applications should not call this API directly. Elastic reserves the right to change or remove this feature in future releases without prior notice. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-has-privileges-user-profile | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-security-has-privileges-user-profile | Elasticsearch API documentation} */ async hasPrivilegesUserProfile (this: That, params: T.SecurityHasPrivilegesUserProfileRequest, options?: TransportRequestOptionsWithOutMeta): Promise async hasPrivilegesUserProfile (this: That, params: T.SecurityHasPrivilegesUserProfileRequest, options?: TransportRequestOptionsWithMeta): Promise> async hasPrivilegesUserProfile (this: That, params: T.SecurityHasPrivilegesUserProfileRequest, options?: TransportRequestOptions): Promise async hasPrivilegesUserProfile (this: That, params: T.SecurityHasPrivilegesUserProfileRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['uids', 'privileges'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['security.has_privileges_user_profile'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1849,8 +2672,14 @@ export default class Security { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -1864,14 +2693,18 @@ export default class Security { /** * Invalidate API keys. This API invalidates API keys created by the create API key or grant API key APIs. Invalidated API keys fail authentication, but they can still be viewed using the get API key information and query API key information APIs, for at least the configured retention period, until they are automatically deleted. To use this API, you must have at least the `manage_security`, `manage_api_key`, or `manage_own_api_key` cluster privileges. The `manage_security` privilege allows deleting any API key, including both REST and cross cluster API keys. The `manage_api_key` privilege allows deleting any REST API key, but not cross cluster API keys. The `manage_own_api_key` only allows deleting REST API keys that are owned by the user. In addition, with the `manage_own_api_key` privilege, an invalidation request must be issued in one of the three formats: - Set the parameter `owner=true`. - Or, set both `username` and `realm_name` to match the user's identity. - Or, if the request is issued by an API key, that is to say an API key invalidates itself, specify its ID in the `ids` field. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-invalidate-api-key | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-security-invalidate-api-key | Elasticsearch API documentation} */ async invalidateApiKey (this: That, params?: T.SecurityInvalidateApiKeyRequest, options?: TransportRequestOptionsWithOutMeta): Promise async invalidateApiKey (this: That, params?: T.SecurityInvalidateApiKeyRequest, options?: TransportRequestOptionsWithMeta): Promise> async invalidateApiKey (this: That, params?: T.SecurityInvalidateApiKeyRequest, options?: TransportRequestOptions): Promise async invalidateApiKey (this: That, params?: T.SecurityInvalidateApiKeyRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['id', 'ids', 'name', 'owner', 'realm_name', 'username'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['security.invalidate_api_key'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1894,8 +2727,14 @@ export default class Security { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -1909,14 +2748,18 @@ export default class Security { /** * Invalidate a token. The access tokens returned by the get token API have a finite period of time for which they are valid. After that time period, they can no longer be used. The time period is defined by the `xpack.security.authc.token.timeout` setting. The refresh tokens returned by the get token API are only valid for 24 hours. They can also be used exactly once. If you want to invalidate one or more access or refresh tokens immediately, use this invalidate token API. NOTE: While all parameters are optional, at least one of them is required. More specifically, either one of `token` or `refresh_token` parameters is required. If none of these two are specified, then `realm_name` and/or `username` need to be specified. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-invalidate-token | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-security-invalidate-token | Elasticsearch API documentation} */ async invalidateToken (this: That, params?: T.SecurityInvalidateTokenRequest, options?: TransportRequestOptionsWithOutMeta): Promise async invalidateToken (this: That, params?: T.SecurityInvalidateTokenRequest, options?: TransportRequestOptionsWithMeta): Promise> async invalidateToken (this: That, params?: T.SecurityInvalidateTokenRequest, options?: TransportRequestOptions): Promise async invalidateToken (this: That, params?: T.SecurityInvalidateTokenRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['token', 'refresh_token', 'realm_name', 'username'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['security.invalidate_token'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1939,8 +2782,14 @@ export default class Security { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -1954,14 +2803,18 @@ export default class Security { /** * Authenticate OpenID Connect. Exchange an OpenID Connect authentication response message for an Elasticsearch internal access token and refresh token that can be subsequently used for authentication. Elasticsearch exposes all the necessary OpenID Connect related functionality with the OpenID Connect APIs. These APIs are used internally by Kibana in order to provide OpenID Connect based authentication, but can also be used by other, custom web applications or other clients. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-oidc-authenticate | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-security-oidc-authenticate | Elasticsearch API documentation} */ async oidcAuthenticate (this: That, params: T.SecurityOidcAuthenticateRequest, options?: TransportRequestOptionsWithOutMeta): Promise async oidcAuthenticate (this: That, params: T.SecurityOidcAuthenticateRequest, options?: TransportRequestOptionsWithMeta): Promise> async oidcAuthenticate (this: That, params: T.SecurityOidcAuthenticateRequest, options?: TransportRequestOptions): Promise async oidcAuthenticate (this: That, params: T.SecurityOidcAuthenticateRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['nonce', 'realm', 'redirect_uri', 'state'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['security.oidc_authenticate'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -1983,8 +2836,14 @@ export default class Security { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -1998,14 +2857,18 @@ export default class Security { /** * Logout of OpenID Connect. Invalidate an access token and a refresh token that were generated as a response to the `/_security/oidc/authenticate` API. If the OpenID Connect authentication realm in Elasticsearch is accordingly configured, the response to this call will contain a URI pointing to the end session endpoint of the OpenID Connect Provider in order to perform single logout. Elasticsearch exposes all the necessary OpenID Connect related functionality with the OpenID Connect APIs. These APIs are used internally by Kibana in order to provide OpenID Connect based authentication, but can also be used by other, custom web applications or other clients. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-oidc-logout | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-security-oidc-logout | Elasticsearch API documentation} */ async oidcLogout (this: That, params: T.SecurityOidcLogoutRequest, options?: TransportRequestOptionsWithOutMeta): Promise async oidcLogout (this: That, params: T.SecurityOidcLogoutRequest, options?: TransportRequestOptionsWithMeta): Promise> async oidcLogout (this: That, params: T.SecurityOidcLogoutRequest, options?: TransportRequestOptions): Promise async oidcLogout (this: That, params: T.SecurityOidcLogoutRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['access_token', 'refresh_token'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['security.oidc_logout'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2027,8 +2890,14 @@ export default class Security { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2042,14 +2911,18 @@ export default class Security { /** * Prepare OpenID connect authentication. Create an oAuth 2.0 authentication request as a URL string based on the configuration of the OpenID Connect authentication realm in Elasticsearch. The response of this API is a URL pointing to the Authorization Endpoint of the configured OpenID Connect Provider, which can be used to redirect the browser of the user in order to continue the authentication process. Elasticsearch exposes all the necessary OpenID Connect related functionality with the OpenID Connect APIs. These APIs are used internally by Kibana in order to provide OpenID Connect based authentication, but can also be used by other, custom web applications or other clients. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-oidc-prepare-authentication | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-security-oidc-prepare-authentication | Elasticsearch API documentation} */ async oidcPrepareAuthentication (this: That, params?: T.SecurityOidcPrepareAuthenticationRequest, options?: TransportRequestOptionsWithOutMeta): Promise async oidcPrepareAuthentication (this: That, params?: T.SecurityOidcPrepareAuthenticationRequest, options?: TransportRequestOptionsWithMeta): Promise> async oidcPrepareAuthentication (this: That, params?: T.SecurityOidcPrepareAuthenticationRequest, options?: TransportRequestOptions): Promise async oidcPrepareAuthentication (this: That, params?: T.SecurityOidcPrepareAuthenticationRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['iss', 'login_hint', 'nonce', 'realm', 'state'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['security.oidc_prepare_authentication'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2072,8 +2945,14 @@ export default class Security { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2087,14 +2966,18 @@ export default class Security { /** * Create or update application privileges. To use this API, you must have one of the following privileges: * The `manage_security` cluster privilege (or a greater privilege such as `all`). * The "Manage Application Privileges" global privilege for the application being referenced in the request. Application names are formed from a prefix, with an optional suffix that conform to the following rules: * The prefix must begin with a lowercase ASCII letter. * The prefix must contain only ASCII letters or digits. * The prefix must be at least 3 characters long. * If the suffix exists, it must begin with either a dash `-` or `_`. * The suffix cannot contain any of the following characters: `\`, `/`, `*`, `?`, `"`, `<`, `>`, `|`, `,`, `*`. * No part of the name can contain whitespace. Privilege names must begin with a lowercase ASCII letter and must contain only ASCII letters and digits along with the characters `_`, `-`, and `.`. Action names can contain any number of printable ASCII characters and must contain at least one of the following characters: `/`, `*`, `:`. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-put-privileges | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-security-put-privileges | Elasticsearch API documentation} */ async putPrivileges (this: That, params: T.SecurityPutPrivilegesRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putPrivileges (this: That, params: T.SecurityPutPrivilegesRequest, options?: TransportRequestOptionsWithMeta): Promise> async putPrivileges (this: That, params: T.SecurityPutPrivilegesRequest, options?: TransportRequestOptions): Promise async putPrivileges (this: That, params: T.SecurityPutPrivilegesRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['privileges'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['security.put_privileges'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2106,8 +2989,14 @@ export default class Security { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2121,14 +3010,18 @@ export default class Security { /** * Create or update roles. The role management APIs are generally the preferred way to manage roles in the native realm, rather than using file-based role management. The create or update roles API cannot update roles that are defined in roles files. File-based role management is not available in Elastic Serverless. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-put-role | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-security-put-role | Elasticsearch API documentation} */ async putRole (this: That, params: T.SecurityPutRoleRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putRole (this: That, params: T.SecurityPutRoleRequest, options?: TransportRequestOptionsWithMeta): Promise> async putRole (this: That, params: T.SecurityPutRoleRequest, options?: TransportRequestOptions): Promise async putRole (this: That, params: T.SecurityPutRoleRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] - const acceptedBody: string[] = ['applications', 'cluster', 'global', 'indices', 'remote_indices', 'remote_cluster', 'metadata', 'run_as', 'description', 'transient_metadata'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['security.put_role'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2150,8 +3043,14 @@ export default class Security { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2168,14 +3067,18 @@ export default class Security { /** * Create or update role mappings. Role mappings define which roles are assigned to each user. Each mapping has rules that identify users and a list of roles that are granted to those users. The role mapping APIs are generally the preferred way to manage role mappings rather than using role mapping files. The create or update role mappings API cannot update role mappings that are defined in role mapping files. NOTE: This API does not create roles. Rather, it maps users to existing roles. Roles can be created by using the create or update roles API or roles files. **Role templates** The most common use for role mappings is to create a mapping from a known value on the user to a fixed role name. For example, all users in the `cn=admin,dc=example,dc=com` LDAP group should be given the superuser role in Elasticsearch. The `roles` field is used for this purpose. For more complex needs, it is possible to use Mustache templates to dynamically determine the names of the roles that should be granted to the user. The `role_templates` field is used for this purpose. NOTE: To use role templates successfully, the relevant scripting feature must be enabled. Otherwise, all attempts to create a role mapping with role templates fail. All of the user fields that are available in the role mapping rules are also available in the role templates. Thus it is possible to assign a user to a role that reflects their username, their groups, or the name of the realm to which they authenticated. By default a template is evaluated to produce a single string that is the name of the role which should be assigned to the user. If the format of the template is set to "json" then the template is expected to produce a JSON string or an array of JSON strings for the role names. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-put-role-mapping | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-security-put-role-mapping | Elasticsearch API documentation} */ async putRoleMapping (this: That, params: T.SecurityPutRoleMappingRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putRoleMapping (this: That, params: T.SecurityPutRoleMappingRequest, options?: TransportRequestOptionsWithMeta): Promise> async putRoleMapping (this: That, params: T.SecurityPutRoleMappingRequest, options?: TransportRequestOptions): Promise async putRoleMapping (this: That, params: T.SecurityPutRoleMappingRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] - const acceptedBody: string[] = ['enabled', 'metadata', 'roles', 'role_templates', 'rules', 'run_as'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['security.put_role_mapping'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2197,8 +3100,14 @@ export default class Security { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2215,14 +3124,18 @@ export default class Security { /** * Create or update users. Add and update users in the native realm. A password is required for adding a new user but is optional when updating an existing user. To change a user's password without updating any other fields, use the change password API. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-put-user | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-security-put-user | Elasticsearch API documentation} */ async putUser (this: That, params: T.SecurityPutUserRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putUser (this: That, params: T.SecurityPutUserRequest, options?: TransportRequestOptionsWithMeta): Promise> async putUser (this: That, params: T.SecurityPutUserRequest, options?: TransportRequestOptions): Promise async putUser (this: That, params: T.SecurityPutUserRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['username', 'email', 'full_name', 'metadata', 'password', 'password_hash', 'roles', 'enabled'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['security.put_user'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2244,8 +3157,14 @@ export default class Security { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2262,14 +3181,18 @@ export default class Security { /** * Find API keys with a query. Get a paginated list of API keys and their information. You can optionally filter the results with a query. To use this API, you must have at least the `manage_own_api_key` or the `read_security` cluster privileges. If you have only the `manage_own_api_key` privilege, this API returns only the API keys that you own. If you have the `read_security`, `manage_api_key`, or greater privileges (including `manage_security`), this API returns all API keys regardless of ownership. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-query-api-keys | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-security-query-api-keys | Elasticsearch API documentation} */ async queryApiKeys (this: That, params?: T.SecurityQueryApiKeysRequest, options?: TransportRequestOptionsWithOutMeta): Promise async queryApiKeys (this: That, params?: T.SecurityQueryApiKeysRequest, options?: TransportRequestOptionsWithMeta): Promise> async queryApiKeys (this: That, params?: T.SecurityQueryApiKeysRequest, options?: TransportRequestOptions): Promise async queryApiKeys (this: That, params?: T.SecurityQueryApiKeysRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['aggregations', 'aggs', 'query', 'from', 'sort', 'size', 'search_after'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['security.query_api_keys'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2292,8 +3215,14 @@ export default class Security { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2307,14 +3236,18 @@ export default class Security { /** * Find roles with a query. Get roles in a paginated manner. The role management APIs are generally the preferred way to manage roles, rather than using file-based role management. The query roles API does not retrieve roles that are defined in roles files, nor built-in ones. You can optionally filter the results with a query. Also, the results can be paginated and sorted. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-query-role | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-security-query-role | Elasticsearch API documentation} */ async queryRole (this: That, params?: T.SecurityQueryRoleRequest, options?: TransportRequestOptionsWithOutMeta): Promise async queryRole (this: That, params?: T.SecurityQueryRoleRequest, options?: TransportRequestOptionsWithMeta): Promise> async queryRole (this: That, params?: T.SecurityQueryRoleRequest, options?: TransportRequestOptions): Promise async queryRole (this: That, params?: T.SecurityQueryRoleRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['query', 'from', 'sort', 'size', 'search_after'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['security.query_role'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2337,8 +3270,14 @@ export default class Security { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2352,14 +3291,18 @@ export default class Security { /** * Find users with a query. Get information for users in a paginated manner. You can optionally filter the results with a query. NOTE: As opposed to the get user API, built-in users are excluded from the result. This API is only for native users. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-query-user | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-security-query-user | Elasticsearch API documentation} */ async queryUser (this: That, params?: T.SecurityQueryUserRequest, options?: TransportRequestOptionsWithOutMeta): Promise async queryUser (this: That, params?: T.SecurityQueryUserRequest, options?: TransportRequestOptionsWithMeta): Promise> async queryUser (this: That, params?: T.SecurityQueryUserRequest, options?: TransportRequestOptions): Promise async queryUser (this: That, params?: T.SecurityQueryUserRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['query', 'from', 'sort', 'size', 'search_after'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['security.query_user'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2382,8 +3325,14 @@ export default class Security { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2397,14 +3346,18 @@ export default class Security { /** * Authenticate SAML. Submit a SAML response message to Elasticsearch for consumption. NOTE: This API is intended for use by custom web applications other than Kibana. If you are using Kibana, refer to the documentation for configuring SAML single-sign-on on the Elastic Stack. The SAML message that is submitted can be: * A response to a SAML authentication request that was previously created using the SAML prepare authentication API. * An unsolicited SAML message in the case of an IdP-initiated single sign-on (SSO) flow. In either case, the SAML message needs to be a base64 encoded XML document with a root element of ``. After successful validation, Elasticsearch responds with an Elasticsearch internal access token and refresh token that can be subsequently used for authentication. This API endpoint essentially exchanges SAML responses that indicate successful authentication in the IdP for Elasticsearch access and refresh tokens, which can be used for authentication against Elasticsearch. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-saml-authenticate | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-security-saml-authenticate | Elasticsearch API documentation} */ async samlAuthenticate (this: That, params: T.SecuritySamlAuthenticateRequest, options?: TransportRequestOptionsWithOutMeta): Promise async samlAuthenticate (this: That, params: T.SecuritySamlAuthenticateRequest, options?: TransportRequestOptionsWithMeta): Promise> async samlAuthenticate (this: That, params: T.SecuritySamlAuthenticateRequest, options?: TransportRequestOptions): Promise async samlAuthenticate (this: That, params: T.SecuritySamlAuthenticateRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['content', 'ids', 'realm'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['security.saml_authenticate'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2426,8 +3379,14 @@ export default class Security { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2441,14 +3400,18 @@ export default class Security { /** * Logout of SAML completely. Verifies the logout response sent from the SAML IdP. NOTE: This API is intended for use by custom web applications other than Kibana. If you are using Kibana, refer to the documentation for configuring SAML single-sign-on on the Elastic Stack. The SAML IdP may send a logout response back to the SP after handling the SP-initiated SAML Single Logout. This API verifies the response by ensuring the content is relevant and validating its signature. An empty response is returned if the verification process is successful. The response can be sent by the IdP with either the HTTP-Redirect or the HTTP-Post binding. The caller of this API must prepare the request accordingly so that this API can handle either of them. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-saml-complete-logout | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-security-saml-complete-logout | Elasticsearch API documentation} */ async samlCompleteLogout (this: That, params: T.SecuritySamlCompleteLogoutRequest, options?: TransportRequestOptionsWithOutMeta): Promise async samlCompleteLogout (this: That, params: T.SecuritySamlCompleteLogoutRequest, options?: TransportRequestOptionsWithMeta): Promise> async samlCompleteLogout (this: That, params: T.SecuritySamlCompleteLogoutRequest, options?: TransportRequestOptions): Promise async samlCompleteLogout (this: That, params: T.SecuritySamlCompleteLogoutRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['realm', 'ids', 'query_string', 'content'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['security.saml_complete_logout'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2470,8 +3433,14 @@ export default class Security { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2485,14 +3454,18 @@ export default class Security { /** * Invalidate SAML. Submit a SAML LogoutRequest message to Elasticsearch for consumption. NOTE: This API is intended for use by custom web applications other than Kibana. If you are using Kibana, refer to the documentation for configuring SAML single-sign-on on the Elastic Stack. The logout request comes from the SAML IdP during an IdP initiated Single Logout. The custom web application can use this API to have Elasticsearch process the `LogoutRequest`. After successful validation of the request, Elasticsearch invalidates the access token and refresh token that corresponds to that specific SAML principal and provides a URL that contains a SAML LogoutResponse message. Thus the user can be redirected back to their IdP. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-saml-invalidate | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-security-saml-invalidate | Elasticsearch API documentation} */ async samlInvalidate (this: That, params: T.SecuritySamlInvalidateRequest, options?: TransportRequestOptionsWithOutMeta): Promise async samlInvalidate (this: That, params: T.SecuritySamlInvalidateRequest, options?: TransportRequestOptionsWithMeta): Promise> async samlInvalidate (this: That, params: T.SecuritySamlInvalidateRequest, options?: TransportRequestOptions): Promise async samlInvalidate (this: That, params: T.SecuritySamlInvalidateRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['acs', 'query_string', 'realm'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['security.saml_invalidate'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2514,8 +3487,14 @@ export default class Security { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2529,14 +3508,18 @@ export default class Security { /** * Logout of SAML. Submits a request to invalidate an access token and refresh token. NOTE: This API is intended for use by custom web applications other than Kibana. If you are using Kibana, refer to the documentation for configuring SAML single-sign-on on the Elastic Stack. This API invalidates the tokens that were generated for a user by the SAML authenticate API. If the SAML realm in Elasticsearch is configured accordingly and the SAML IdP supports this, the Elasticsearch response contains a URL to redirect the user to the IdP that contains a SAML logout request (starting an SP-initiated SAML Single Logout). - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-saml-logout | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-security-saml-logout | Elasticsearch API documentation} */ async samlLogout (this: That, params: T.SecuritySamlLogoutRequest, options?: TransportRequestOptionsWithOutMeta): Promise async samlLogout (this: That, params: T.SecuritySamlLogoutRequest, options?: TransportRequestOptionsWithMeta): Promise> async samlLogout (this: That, params: T.SecuritySamlLogoutRequest, options?: TransportRequestOptions): Promise async samlLogout (this: That, params: T.SecuritySamlLogoutRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['token', 'refresh_token'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['security.saml_logout'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2558,8 +3541,14 @@ export default class Security { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2573,14 +3562,18 @@ export default class Security { /** * Prepare SAML authentication. Create a SAML authentication request (``) as a URL string based on the configuration of the respective SAML realm in Elasticsearch. NOTE: This API is intended for use by custom web applications other than Kibana. If you are using Kibana, refer to the documentation for configuring SAML single-sign-on on the Elastic Stack. This API returns a URL pointing to the SAML Identity Provider. You can use the URL to redirect the browser of the user in order to continue the authentication process. The URL includes a single parameter named `SAMLRequest`, which contains a SAML Authentication request that is deflated and Base64 encoded. If the configuration dictates that SAML authentication requests should be signed, the URL has two extra parameters named `SigAlg` and `Signature`. These parameters contain the algorithm used for the signature and the signature value itself. It also returns a random string that uniquely identifies this SAML Authentication request. The caller of this API needs to store this identifier as it needs to be used in a following step of the authentication process. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-saml-prepare-authentication | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-security-saml-prepare-authentication | Elasticsearch API documentation} */ async samlPrepareAuthentication (this: That, params?: T.SecuritySamlPrepareAuthenticationRequest, options?: TransportRequestOptionsWithOutMeta): Promise async samlPrepareAuthentication (this: That, params?: T.SecuritySamlPrepareAuthenticationRequest, options?: TransportRequestOptionsWithMeta): Promise> async samlPrepareAuthentication (this: That, params?: T.SecuritySamlPrepareAuthenticationRequest, options?: TransportRequestOptions): Promise async samlPrepareAuthentication (this: That, params?: T.SecuritySamlPrepareAuthenticationRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['acs', 'realm', 'relay_state'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['security.saml_prepare_authentication'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2603,8 +3596,14 @@ export default class Security { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2618,13 +3617,16 @@ export default class Security { /** * Create SAML service provider metadata. Generate SAML metadata for a SAML 2.0 Service Provider. The SAML 2.0 specification provides a mechanism for Service Providers to describe their capabilities and configuration using a metadata file. This API generates Service Provider metadata based on the configuration of a SAML realm in Elasticsearch. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-saml-service-provider-metadata | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-security-saml-service-provider-metadata | Elasticsearch API documentation} */ async samlServiceProviderMetadata (this: That, params: T.SecuritySamlServiceProviderMetadataRequest, options?: TransportRequestOptionsWithOutMeta): Promise async samlServiceProviderMetadata (this: That, params: T.SecuritySamlServiceProviderMetadataRequest, options?: TransportRequestOptionsWithMeta): Promise> async samlServiceProviderMetadata (this: That, params: T.SecuritySamlServiceProviderMetadataRequest, options?: TransportRequestOptions): Promise async samlServiceProviderMetadata (this: That, params: T.SecuritySamlServiceProviderMetadataRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['realm_name'] + const { + path: acceptedPath + } = this.acceptedParams['security.saml_service_provider_metadata'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2660,14 +3662,18 @@ export default class Security { /** * Suggest a user profile. Get suggestions for user profiles that match specified search criteria. NOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions. Individual users and external applications should not call this API directly. Elastic reserves the right to change or remove this feature in future releases without prior notice. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-suggest-user-profiles | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-security-suggest-user-profiles | Elasticsearch API documentation} */ async suggestUserProfiles (this: That, params?: T.SecuritySuggestUserProfilesRequest, options?: TransportRequestOptionsWithOutMeta): Promise async suggestUserProfiles (this: That, params?: T.SecuritySuggestUserProfilesRequest, options?: TransportRequestOptionsWithMeta): Promise> async suggestUserProfiles (this: That, params?: T.SecuritySuggestUserProfilesRequest, options?: TransportRequestOptions): Promise async suggestUserProfiles (this: That, params?: T.SecuritySuggestUserProfilesRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['name', 'size', 'data', 'hint'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['security.suggest_user_profiles'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2690,8 +3696,14 @@ export default class Security { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2705,14 +3717,18 @@ export default class Security { /** * Update an API key. Update attributes of an existing API key. This API supports updates to an API key's access scope, expiration, and metadata. To use this API, you must have at least the `manage_own_api_key` cluster privilege. Users can only update API keys that they created or that were granted to them. To update another user’s API key, use the `run_as` feature to submit a request on behalf of another user. IMPORTANT: It's not possible to use an API key as the authentication credential for this API. The owner user’s credentials are required. Use this API to update API keys created by the create API key or grant API Key APIs. If you need to apply the same update to many API keys, you can use the bulk update API keys API to reduce overhead. It's not possible to update expired API keys or API keys that have been invalidated by the invalidate API key API. The access scope of an API key is derived from the `role_descriptors` you specify in the request and a snapshot of the owner user's permissions at the time of the request. The snapshot of the owner's permissions is updated automatically on every call. IMPORTANT: If you don't specify `role_descriptors` in the request, a call to this API might still change the API key's access scope. This change can occur if the owner user's permissions have changed since the API key was created or last modified. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-update-api-key | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-security-update-api-key | Elasticsearch API documentation} */ async updateApiKey (this: That, params: T.SecurityUpdateApiKeyRequest, options?: TransportRequestOptionsWithOutMeta): Promise async updateApiKey (this: That, params: T.SecurityUpdateApiKeyRequest, options?: TransportRequestOptionsWithMeta): Promise> async updateApiKey (this: That, params: T.SecurityUpdateApiKeyRequest, options?: TransportRequestOptions): Promise async updateApiKey (this: That, params: T.SecurityUpdateApiKeyRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] - const acceptedBody: string[] = ['role_descriptors', 'metadata', 'expiration'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['security.update_api_key'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2734,8 +3750,14 @@ export default class Security { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2752,14 +3774,18 @@ export default class Security { /** * Update a cross-cluster API key. Update the attributes of an existing cross-cluster API key, which is used for API key based remote cluster access. To use this API, you must have at least the `manage_security` cluster privilege. Users can only update API keys that they created. To update another user's API key, use the `run_as` feature to submit a request on behalf of another user. IMPORTANT: It's not possible to use an API key as the authentication credential for this API. To update an API key, the owner user's credentials are required. It's not possible to update expired API keys, or API keys that have been invalidated by the invalidate API key API. This API supports updates to an API key's access scope, metadata, and expiration. The owner user's information, such as the `username` and `realm`, is also updated automatically on every call. NOTE: This API cannot update REST API keys, which should be updated by either the update API key or bulk update API keys API. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-update-cross-cluster-api-key | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-security-update-cross-cluster-api-key | Elasticsearch API documentation} */ async updateCrossClusterApiKey (this: That, params: T.SecurityUpdateCrossClusterApiKeyRequest, options?: TransportRequestOptionsWithOutMeta): Promise async updateCrossClusterApiKey (this: That, params: T.SecurityUpdateCrossClusterApiKeyRequest, options?: TransportRequestOptionsWithMeta): Promise> async updateCrossClusterApiKey (this: That, params: T.SecurityUpdateCrossClusterApiKeyRequest, options?: TransportRequestOptions): Promise async updateCrossClusterApiKey (this: That, params: T.SecurityUpdateCrossClusterApiKeyRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] - const acceptedBody: string[] = ['access', 'expiration', 'metadata'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['security.update_cross_cluster_api_key'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2781,8 +3807,14 @@ export default class Security { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2799,14 +3831,18 @@ export default class Security { /** * Update security index settings. Update the user-configurable settings for the security internal index (`.security` and associated indices). Only a subset of settings are allowed to be modified. This includes `index.auto_expand_replicas` and `index.number_of_replicas`. NOTE: If `index.auto_expand_replicas` is set, `index.number_of_replicas` will be ignored during updates. If a specific index is not in use on the system and settings are provided for it, the request will be rejected. This API does not yet support configuring the settings for indices before they are in use. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-update-settings | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-security-update-settings | Elasticsearch API documentation} */ async updateSettings (this: That, params?: T.SecurityUpdateSettingsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async updateSettings (this: That, params?: T.SecurityUpdateSettingsRequest, options?: TransportRequestOptionsWithMeta): Promise> async updateSettings (this: That, params?: T.SecurityUpdateSettingsRequest, options?: TransportRequestOptions): Promise async updateSettings (this: That, params?: T.SecurityUpdateSettingsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['security', 'security-profile', 'security-tokens'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['security.update_settings'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2829,8 +3865,14 @@ export default class Security { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -2844,14 +3886,18 @@ export default class Security { /** * Update user profile data. Update specific data for the user profile that is associated with a unique ID. NOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions. Individual users and external applications should not call this API directly. Elastic reserves the right to change or remove this feature in future releases without prior notice. To use this API, you must have one of the following privileges: * The `manage_user_profile` cluster privilege. * The `update_profile_data` global privilege for the namespaces that are referenced in the request. This API updates the `labels` and `data` fields of an existing user profile document with JSON objects. New keys and their values are added to the profile document and conflicting keys are replaced by data that's included in the request. For both labels and data, content is namespaced by the top-level fields. The `update_profile_data` global privilege grants privileges for updating only the allowed namespaces. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-update-user-profile-data | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-security-update-user-profile-data | Elasticsearch API documentation} */ async updateUserProfileData (this: That, params: T.SecurityUpdateUserProfileDataRequest, options?: TransportRequestOptionsWithOutMeta): Promise async updateUserProfileData (this: That, params: T.SecurityUpdateUserProfileDataRequest, options?: TransportRequestOptionsWithMeta): Promise> async updateUserProfileData (this: That, params: T.SecurityUpdateUserProfileDataRequest, options?: TransportRequestOptions): Promise async updateUserProfileData (this: That, params: T.SecurityUpdateUserProfileDataRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['uid'] - const acceptedBody: string[] = ['labels', 'data'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['security.update_user_profile_data'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -2873,8 +3919,14 @@ export default class Security { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/shutdown.ts b/src/api/api/shutdown.ts index ffa3b9c39..67755db18 100644 --- a/src/api/api/shutdown.ts +++ b/src/api/api/shutdown.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,23 +21,69 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class Shutdown { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'shutdown.delete_node': { + path: [ + 'node_id' + ], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'shutdown.get_node': { + path: [ + 'node_id' + ], + body: [], + query: [ + 'master_timeout' + ] + }, + 'shutdown.put_node': { + path: [ + 'node_id' + ], + body: [ + 'type', + 'reason', + 'allocation_delay', + 'target_node_name' + ], + query: [ + 'master_timeout', + 'timeout' + ] + } + } } /** * Cancel node shutdown preparations. Remove a node from the shutdown list so it can resume normal operations. You must explicitly clear the shutdown request when a node rejoins the cluster or when a node has permanently left the cluster. Shutdown requests are never removed automatically by Elasticsearch. NOTE: This feature is designed for indirect use by Elastic Cloud, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. If the operator privileges feature is enabled, you must be an operator to use this API. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-shutdown-delete-node | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-shutdown-delete-node | Elasticsearch API documentation} */ async deleteNode (this: That, params: T.ShutdownDeleteNodeRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteNode (this: That, params: T.ShutdownDeleteNodeRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteNode (this: That, params: T.ShutdownDeleteNodeRequest, options?: TransportRequestOptions): Promise async deleteNode (this: That, params: T.ShutdownDeleteNodeRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['node_id'] + const { + path: acceptedPath + } = this.acceptedParams['shutdown.delete_node'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -87,13 +119,16 @@ export default class Shutdown { /** * Get the shutdown status. Get information about nodes that are ready to be shut down, have shut down preparations still in progress, or have stalled. The API returns status information for each part of the shut down process. NOTE: This feature is designed for indirect use by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. If the operator privileges feature is enabled, you must be an operator to use this API. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-shutdown-get-node | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-shutdown-get-node | Elasticsearch API documentation} */ async getNode (this: That, params?: T.ShutdownGetNodeRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getNode (this: That, params?: T.ShutdownGetNodeRequest, options?: TransportRequestOptionsWithMeta): Promise> async getNode (this: That, params?: T.ShutdownGetNodeRequest, options?: TransportRequestOptions): Promise async getNode (this: That, params?: T.ShutdownGetNodeRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['node_id'] + const { + path: acceptedPath + } = this.acceptedParams['shutdown.get_node'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -137,14 +172,18 @@ export default class Shutdown { /** * Prepare a node to be shut down. NOTE: This feature is designed for indirect use by Elastic Cloud, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. If you specify a node that is offline, it will be prepared for shut down when it rejoins the cluster. If the operator privileges feature is enabled, you must be an operator to use this API. The API migrates ongoing tasks and index shards to other nodes as needed to prepare a node to be restarted or shut down and removed from the cluster. This ensures that Elasticsearch can be stopped safely with minimal disruption to the cluster. You must specify the type of shutdown: `restart`, `remove`, or `replace`. If a node is already being prepared for shutdown, you can use this API to change the shutdown type. IMPORTANT: This API does NOT terminate the Elasticsearch process. Monitor the node shutdown status to determine when it is safe to stop Elasticsearch. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-shutdown-put-node | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-shutdown-put-node | Elasticsearch API documentation} */ async putNode (this: That, params: T.ShutdownPutNodeRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putNode (this: That, params: T.ShutdownPutNodeRequest, options?: TransportRequestOptionsWithMeta): Promise> async putNode (this: That, params: T.ShutdownPutNodeRequest, options?: TransportRequestOptions): Promise async putNode (this: That, params: T.ShutdownPutNodeRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['node_id'] - const acceptedBody: string[] = ['type', 'reason', 'allocation_delay', 'target_node_name'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['shutdown.put_node'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -166,8 +205,14 @@ export default class Shutdown { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/simulate.ts b/src/api/api/simulate.ts index ba1689505..68f123b5a 100644 --- a/src/api/api/simulate.ts +++ b/src/api/api/simulate.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,24 +21,52 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class Simulate { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'simulate.ingest': { + path: [ + 'index' + ], + body: [ + 'docs', + 'component_template_substitutions', + 'index_template_substitutions', + 'mapping_addition', + 'pipeline_substitutions' + ], + query: [ + 'pipeline' + ] + } + } } /** * Simulate data ingestion. Run ingest pipelines against a set of provided documents, optionally with substitute pipeline definitions, to simulate ingesting data into an index. This API is meant to be used for troubleshooting or pipeline development, as it does not actually index any data into Elasticsearch. The API runs the default and final pipeline for that index against a set of documents provided in the body of the request. If a pipeline contains a reroute processor, it follows that reroute processor to the new index, running that index's pipelines as well the same way that a non-simulated ingest would. No data is indexed into Elasticsearch. Instead, the transformed document is returned, along with the list of pipelines that have been run and the name of the index where the document would have been indexed if this were not a simulation. The transformed document is validated against the mappings that would apply to this index, and any validation error is reported in the result. This API differs from the simulate pipeline API in that you specify a single pipeline for that API, and it runs only that one pipeline. The simulate pipeline API is more useful for developing a single pipeline, while the simulate ingest API is more useful for troubleshooting the interaction of the various pipelines that get applied when ingesting into an index. By default, the pipeline definitions that are currently in the system are used. However, you can supply substitute pipeline definitions in the body of the request. These will be used in place of the pipeline definitions that are already in the system. This can be used to replace existing pipeline definitions or to create new ones. The pipeline substitutions are used only within this request. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-simulate-ingest | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-simulate-ingest | Elasticsearch API documentation} */ async ingest (this: That, params: T.SimulateIngestRequest, options?: TransportRequestOptionsWithOutMeta): Promise async ingest (this: That, params: T.SimulateIngestRequest, options?: TransportRequestOptionsWithMeta): Promise> async ingest (this: That, params: T.SimulateIngestRequest, options?: TransportRequestOptions): Promise async ingest (this: That, params: T.SimulateIngestRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['docs', 'component_template_substitutions', 'index_template_subtitutions', 'mapping_addition', 'pipeline_substitutions'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['simulate.ingest'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -74,8 +88,14 @@ export default class Simulate { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/slm.ts b/src/api/api/slm.ts index 9e6a856f9..cb290106f 100644 --- a/src/api/api/slm.ts +++ b/src/api/api/slm.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,23 +21,121 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class Slm { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'slm.delete_lifecycle': { + path: [ + 'policy_id' + ], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'slm.execute_lifecycle': { + path: [ + 'policy_id' + ], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'slm.execute_retention': { + path: [], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'slm.get_lifecycle': { + path: [ + 'policy_id' + ], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'slm.get_stats': { + path: [], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'slm.get_status': { + path: [], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'slm.put_lifecycle': { + path: [ + 'policy_id' + ], + body: [ + 'config', + 'name', + 'repository', + 'retention', + 'schedule' + ], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'slm.start': { + path: [], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'slm.stop': { + path: [], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + } + } } /** * Delete a policy. Delete a snapshot lifecycle policy definition. This operation prevents any future snapshots from being taken but does not cancel in-progress snapshots or remove previously-taken snapshots. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-delete-lifecycle | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-slm-delete-lifecycle | Elasticsearch API documentation} */ async deleteLifecycle (this: That, params: T.SlmDeleteLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteLifecycle (this: That, params: T.SlmDeleteLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteLifecycle (this: That, params: T.SlmDeleteLifecycleRequest, options?: TransportRequestOptions): Promise async deleteLifecycle (this: That, params: T.SlmDeleteLifecycleRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['policy_id'] + const { + path: acceptedPath + } = this.acceptedParams['slm.delete_lifecycle'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -87,13 +171,16 @@ export default class Slm { /** * Run a policy. Immediately create a snapshot according to the snapshot lifecycle policy without waiting for the scheduled time. The snapshot policy is normally applied according to its schedule, but you might want to manually run a policy before performing an upgrade or other maintenance. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-execute-lifecycle | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-slm-execute-lifecycle | Elasticsearch API documentation} */ async executeLifecycle (this: That, params: T.SlmExecuteLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise async executeLifecycle (this: That, params: T.SlmExecuteLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> async executeLifecycle (this: That, params: T.SlmExecuteLifecycleRequest, options?: TransportRequestOptions): Promise async executeLifecycle (this: That, params: T.SlmExecuteLifecycleRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['policy_id'] + const { + path: acceptedPath + } = this.acceptedParams['slm.execute_lifecycle'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -129,13 +216,16 @@ export default class Slm { /** * Run a retention policy. Manually apply the retention policy to force immediate removal of snapshots that are expired according to the snapshot lifecycle policy retention rules. The retention policy is normally applied according to its schedule. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-execute-retention | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-slm-execute-retention | Elasticsearch API documentation} */ async executeRetention (this: That, params?: T.SlmExecuteRetentionRequest, options?: TransportRequestOptionsWithOutMeta): Promise async executeRetention (this: That, params?: T.SlmExecuteRetentionRequest, options?: TransportRequestOptionsWithMeta): Promise> async executeRetention (this: That, params?: T.SlmExecuteRetentionRequest, options?: TransportRequestOptions): Promise async executeRetention (this: That, params?: T.SlmExecuteRetentionRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['slm.execute_retention'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -169,13 +259,16 @@ export default class Slm { /** * Get policy information. Get snapshot lifecycle policy definitions and information about the latest snapshot attempts. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-get-lifecycle | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-slm-get-lifecycle | Elasticsearch API documentation} */ async getLifecycle (this: That, params?: T.SlmGetLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getLifecycle (this: That, params?: T.SlmGetLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> async getLifecycle (this: That, params?: T.SlmGetLifecycleRequest, options?: TransportRequestOptions): Promise async getLifecycle (this: That, params?: T.SlmGetLifecycleRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['policy_id'] + const { + path: acceptedPath + } = this.acceptedParams['slm.get_lifecycle'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -219,13 +312,16 @@ export default class Slm { /** * Get snapshot lifecycle management statistics. Get global and policy-level statistics about actions taken by snapshot lifecycle management. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-get-stats | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-slm-get-stats | Elasticsearch API documentation} */ async getStats (this: That, params?: T.SlmGetStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getStats (this: That, params?: T.SlmGetStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getStats (this: That, params?: T.SlmGetStatsRequest, options?: TransportRequestOptions): Promise async getStats (this: That, params?: T.SlmGetStatsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['slm.get_stats'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -259,13 +355,16 @@ export default class Slm { /** * Get the snapshot lifecycle management status. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-get-status | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-slm-get-status | Elasticsearch API documentation} */ async getStatus (this: That, params?: T.SlmGetStatusRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getStatus (this: That, params?: T.SlmGetStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> async getStatus (this: That, params?: T.SlmGetStatusRequest, options?: TransportRequestOptions): Promise async getStatus (this: That, params?: T.SlmGetStatusRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['slm.get_status'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -299,14 +398,18 @@ export default class Slm { /** * Create or update a policy. Create or update a snapshot lifecycle policy. If the policy already exists, this request increments the policy version. Only the latest version of a policy is stored. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-put-lifecycle | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-slm-put-lifecycle | Elasticsearch API documentation} */ async putLifecycle (this: That, params: T.SlmPutLifecycleRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putLifecycle (this: That, params: T.SlmPutLifecycleRequest, options?: TransportRequestOptionsWithMeta): Promise> async putLifecycle (this: That, params: T.SlmPutLifecycleRequest, options?: TransportRequestOptions): Promise async putLifecycle (this: That, params: T.SlmPutLifecycleRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['policy_id'] - const acceptedBody: string[] = ['config', 'name', 'repository', 'retention', 'schedule'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['slm.put_lifecycle'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -328,8 +431,14 @@ export default class Slm { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -346,13 +455,16 @@ export default class Slm { /** * Start snapshot lifecycle management. Snapshot lifecycle management (SLM) starts automatically when a cluster is formed. Manually starting SLM is necessary only if it has been stopped using the stop SLM API. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-start | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-slm-start | Elasticsearch API documentation} */ async start (this: That, params?: T.SlmStartRequest, options?: TransportRequestOptionsWithOutMeta): Promise async start (this: That, params?: T.SlmStartRequest, options?: TransportRequestOptionsWithMeta): Promise> async start (this: That, params?: T.SlmStartRequest, options?: TransportRequestOptions): Promise async start (this: That, params?: T.SlmStartRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['slm.start'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -386,13 +498,16 @@ export default class Slm { /** * Stop snapshot lifecycle management. Stop all snapshot lifecycle management (SLM) operations and the SLM plugin. This API is useful when you are performing maintenance on a cluster and need to prevent SLM from performing any actions on your data streams or indices. Stopping SLM does not stop any snapshots that are in progress. You can manually trigger snapshots with the run snapshot lifecycle policy API even if SLM is stopped. The API returns a response as soon as the request is acknowledged, but the plugin might continue to run until in-progress operations complete and it can be safely stopped. Use the get snapshot lifecycle management status API to see if SLM is running. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-slm-stop | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-slm-stop | Elasticsearch API documentation} */ async stop (this: That, params?: T.SlmStopRequest, options?: TransportRequestOptionsWithOutMeta): Promise async stop (this: That, params?: T.SlmStopRequest, options?: TransportRequestOptionsWithMeta): Promise> async stop (this: That, params?: T.SlmStopRequest, options?: TransportRequestOptions): Promise async stop (this: That, params?: T.SlmStopRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['slm.stop'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/snapshot.ts b/src/api/api/snapshot.ts index 3b37c9bdb..8367baad7 100644 --- a/src/api/api/snapshot.ts +++ b/src/api/api/snapshot.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,23 +21,221 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class Snapshot { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'snapshot.cleanup_repository': { + path: [ + 'name' + ], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'snapshot.clone': { + path: [ + 'repository', + 'snapshot', + 'target_snapshot' + ], + body: [ + 'indices' + ], + query: [ + 'master_timeout' + ] + }, + 'snapshot.create': { + path: [ + 'repository', + 'snapshot' + ], + body: [ + 'expand_wildcards', + 'feature_states', + 'ignore_unavailable', + 'include_global_state', + 'indices', + 'metadata', + 'partial' + ], + query: [ + 'master_timeout', + 'wait_for_completion' + ] + }, + 'snapshot.create_repository': { + path: [ + 'name' + ], + body: [ + 'repository' + ], + query: [ + 'master_timeout', + 'timeout', + 'verify' + ] + }, + 'snapshot.delete': { + path: [ + 'repository', + 'snapshot' + ], + body: [], + query: [ + 'master_timeout' + ] + }, + 'snapshot.delete_repository': { + path: [ + 'name' + ], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + }, + 'snapshot.get': { + path: [ + 'repository', + 'snapshot' + ], + body: [], + query: [ + 'after', + 'from_sort_value', + 'ignore_unavailable', + 'index_details', + 'index_names', + 'include_repository', + 'master_timeout', + 'order', + 'offset', + 'size', + 'slm_policy_filter', + 'sort', + 'verbose' + ] + }, + 'snapshot.get_repository': { + path: [ + 'name' + ], + body: [], + query: [ + 'local', + 'master_timeout' + ] + }, + 'snapshot.repository_analyze': { + path: [ + 'name' + ], + body: [], + query: [ + 'blob_count', + 'concurrency', + 'detailed', + 'early_read_node_count', + 'max_blob_size', + 'max_total_data_size', + 'rare_action_probability', + 'rarely_abort_writes', + 'read_node_count', + 'register_operation_count', + 'seed', + 'timeout' + ] + }, + 'snapshot.repository_verify_integrity': { + path: [ + 'name' + ], + body: [], + query: [ + 'blob_thread_pool_concurrency', + 'index_snapshot_verification_concurrency', + 'index_verification_concurrency', + 'max_bytes_per_sec', + 'max_failed_shard_snapshots', + 'meta_thread_pool_concurrency', + 'snapshot_verification_concurrency', + 'verify_blob_contents' + ] + }, + 'snapshot.restore': { + path: [ + 'repository', + 'snapshot' + ], + body: [ + 'feature_states', + 'ignore_index_settings', + 'ignore_unavailable', + 'include_aliases', + 'include_global_state', + 'index_settings', + 'indices', + 'partial', + 'rename_pattern', + 'rename_replacement' + ], + query: [ + 'master_timeout', + 'wait_for_completion' + ] + }, + 'snapshot.status': { + path: [ + 'repository', + 'snapshot' + ], + body: [], + query: [ + 'ignore_unavailable', + 'master_timeout' + ] + }, + 'snapshot.verify_repository': { + path: [ + 'name' + ], + body: [], + query: [ + 'master_timeout', + 'timeout' + ] + } + } } /** * Clean up the snapshot repository. Trigger the review of the contents of a snapshot repository and delete any stale data not referenced by existing snapshots. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-cleanup-repository | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-snapshot-cleanup-repository | Elasticsearch API documentation} */ async cleanupRepository (this: That, params: T.SnapshotCleanupRepositoryRequest, options?: TransportRequestOptionsWithOutMeta): Promise async cleanupRepository (this: That, params: T.SnapshotCleanupRepositoryRequest, options?: TransportRequestOptionsWithMeta): Promise> async cleanupRepository (this: That, params: T.SnapshotCleanupRepositoryRequest, options?: TransportRequestOptions): Promise async cleanupRepository (this: That, params: T.SnapshotCleanupRepositoryRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['snapshot.cleanup_repository'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -87,14 +271,18 @@ export default class Snapshot { /** * Clone a snapshot. Clone part of all of a snapshot into another snapshot in the same repository. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-clone | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-snapshot-clone | Elasticsearch API documentation} */ async clone (this: That, params: T.SnapshotCloneRequest, options?: TransportRequestOptionsWithOutMeta): Promise async clone (this: That, params: T.SnapshotCloneRequest, options?: TransportRequestOptionsWithMeta): Promise> async clone (this: That, params: T.SnapshotCloneRequest, options?: TransportRequestOptions): Promise async clone (this: That, params: T.SnapshotCloneRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['repository', 'snapshot', 'target_snapshot'] - const acceptedBody: string[] = ['indices'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['snapshot.clone'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -116,8 +304,14 @@ export default class Snapshot { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -136,14 +330,18 @@ export default class Snapshot { /** * Create a snapshot. Take a snapshot of a cluster or of data streams and indices. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-create | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-snapshot-create | Elasticsearch API documentation} */ async create (this: That, params: T.SnapshotCreateRequest, options?: TransportRequestOptionsWithOutMeta): Promise async create (this: That, params: T.SnapshotCreateRequest, options?: TransportRequestOptionsWithMeta): Promise> async create (this: That, params: T.SnapshotCreateRequest, options?: TransportRequestOptions): Promise async create (this: That, params: T.SnapshotCreateRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['repository', 'snapshot'] - const acceptedBody: string[] = ['expand_wildcards', 'feature_states', 'ignore_unavailable', 'include_global_state', 'indices', 'metadata', 'partial'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['snapshot.create'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -165,8 +363,14 @@ export default class Snapshot { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -184,14 +388,18 @@ export default class Snapshot { /** * Create or update a snapshot repository. IMPORTANT: If you are migrating searchable snapshots, the repository name must be identical in the source and destination clusters. To register a snapshot repository, the cluster's global metadata must be writeable. Ensure there are no cluster blocks (for example, `cluster.blocks.read_only` and `clsuter.blocks.read_only_allow_delete` settings) that prevent write access. Several options for this API can be specified using a query parameter or a request body parameter. If both parameters are specified, only the query parameter is used. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-create-repository | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-snapshot-create-repository | Elasticsearch API documentation} */ async createRepository (this: That, params: T.SnapshotCreateRepositoryRequest, options?: TransportRequestOptionsWithOutMeta): Promise async createRepository (this: That, params: T.SnapshotCreateRepositoryRequest, options?: TransportRequestOptionsWithMeta): Promise> async createRepository (this: That, params: T.SnapshotCreateRepositoryRequest, options?: TransportRequestOptions): Promise async createRepository (this: That, params: T.SnapshotCreateRepositoryRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] - const acceptedBody: string[] = ['repository'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['snapshot.create_repository'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -203,8 +411,14 @@ export default class Snapshot { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -221,13 +435,16 @@ export default class Snapshot { /** * Delete snapshots. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-delete | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-snapshot-delete | Elasticsearch API documentation} */ async delete (this: That, params: T.SnapshotDeleteRequest, options?: TransportRequestOptionsWithOutMeta): Promise async delete (this: That, params: T.SnapshotDeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> async delete (this: That, params: T.SnapshotDeleteRequest, options?: TransportRequestOptions): Promise async delete (this: That, params: T.SnapshotDeleteRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['repository', 'snapshot'] + const { + path: acceptedPath + } = this.acceptedParams['snapshot.delete'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -264,13 +481,16 @@ export default class Snapshot { /** * Delete snapshot repositories. When a repository is unregistered, Elasticsearch removes only the reference to the location where the repository is storing the snapshots. The snapshots themselves are left untouched and in place. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-delete-repository | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-snapshot-delete-repository | Elasticsearch API documentation} */ async deleteRepository (this: That, params: T.SnapshotDeleteRepositoryRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteRepository (this: That, params: T.SnapshotDeleteRepositoryRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteRepository (this: That, params: T.SnapshotDeleteRepositoryRequest, options?: TransportRequestOptions): Promise async deleteRepository (this: That, params: T.SnapshotDeleteRepositoryRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['snapshot.delete_repository'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -306,13 +526,16 @@ export default class Snapshot { /** * Get snapshot information. NOTE: The `after` parameter and `next` field enable you to iterate through snapshots with some consistency guarantees regarding concurrent creation or deletion of snapshots. It is guaranteed that any snapshot that exists at the beginning of the iteration and is not concurrently deleted will be seen during the iteration. Snapshots concurrently created may be seen during an iteration. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-get | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-snapshot-get | Elasticsearch API documentation} */ async get (this: That, params: T.SnapshotGetRequest, options?: TransportRequestOptionsWithOutMeta): Promise async get (this: That, params: T.SnapshotGetRequest, options?: TransportRequestOptionsWithMeta): Promise> async get (this: That, params: T.SnapshotGetRequest, options?: TransportRequestOptions): Promise async get (this: That, params: T.SnapshotGetRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['repository', 'snapshot'] + const { + path: acceptedPath + } = this.acceptedParams['snapshot.get'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -349,13 +572,16 @@ export default class Snapshot { /** * Get snapshot repository information. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-get-repository | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-snapshot-get-repository | Elasticsearch API documentation} */ async getRepository (this: That, params?: T.SnapshotGetRepositoryRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getRepository (this: That, params?: T.SnapshotGetRepositoryRequest, options?: TransportRequestOptionsWithMeta): Promise> async getRepository (this: That, params?: T.SnapshotGetRepositoryRequest, options?: TransportRequestOptions): Promise async getRepository (this: That, params?: T.SnapshotGetRepositoryRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['snapshot.get_repository'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -399,13 +625,16 @@ export default class Snapshot { /** * Analyze a snapshot repository. Analyze the performance characteristics and any incorrect behaviour found in a repository. The response exposes implementation details of the analysis which may change from version to version. The response body format is therefore not considered stable and may be different in newer versions. There are a large number of third-party storage systems available, not all of which are suitable for use as a snapshot repository by Elasticsearch. Some storage systems behave incorrectly, or perform poorly, especially when accessed concurrently by multiple clients as the nodes of an Elasticsearch cluster do. This API performs a collection of read and write operations on your repository which are designed to detect incorrect behaviour and to measure the performance characteristics of your storage system. The default values for the parameters are deliberately low to reduce the impact of running an analysis inadvertently and to provide a sensible starting point for your investigations. Run your first analysis with the default parameter values to check for simple problems. If successful, run a sequence of increasingly large analyses until you encounter a failure or you reach a `blob_count` of at least `2000`, a `max_blob_size` of at least `2gb`, a `max_total_data_size` of at least `1tb`, and a `register_operation_count` of at least `100`. Always specify a generous timeout, possibly `1h` or longer, to allow time for each analysis to run to completion. Perform the analyses using a multi-node cluster of a similar size to your production cluster so that it can detect any problems that only arise when the repository is accessed by many nodes at once. If the analysis fails, Elasticsearch detected that your repository behaved unexpectedly. This usually means you are using a third-party storage system with an incorrect or incompatible implementation of the API it claims to support. If so, this storage system is not suitable for use as a snapshot repository. You will need to work with the supplier of your storage system to address the incompatibilities that Elasticsearch detects. If the analysis is successful, the API returns details of the testing process, optionally including how long each operation took. You can use this information to determine the performance of your storage system. If any operation fails or returns an incorrect result, the API returns an error. If the API returns an error, it may not have removed all the data it wrote to the repository. The error will indicate the location of any leftover data and this path is also recorded in the Elasticsearch logs. You should verify that this location has been cleaned up correctly. If there is still leftover data at the specified location, you should manually remove it. If the connection from your client to Elasticsearch is closed while the client is waiting for the result of the analysis, the test is cancelled. Some clients are configured to close their connection if no response is received within a certain timeout. An analysis takes a long time to complete so you might need to relax any such client-side timeouts. On cancellation the analysis attempts to clean up the data it was writing, but it may not be able to remove it all. The path to the leftover data is recorded in the Elasticsearch logs. You should verify that this location has been cleaned up correctly. If there is still leftover data at the specified location, you should manually remove it. If the analysis is successful then it detected no incorrect behaviour, but this does not mean that correct behaviour is guaranteed. The analysis attempts to detect common bugs but it does not offer 100% coverage. Additionally, it does not test the following: * Your repository must perform durable writes. Once a blob has been written it must remain in place until it is deleted, even after a power loss or similar disaster. * Your repository must not suffer from silent data corruption. Once a blob has been written, its contents must remain unchanged until it is deliberately modified or deleted. * Your repository must behave correctly even if connectivity from the cluster is disrupted. Reads and writes may fail in this case, but they must not return incorrect results. IMPORTANT: An analysis writes a substantial amount of data to your repository and then reads it back again. This consumes bandwidth on the network between the cluster and the repository, and storage space and I/O bandwidth on the repository itself. You must ensure this load does not affect other users of these systems. Analyses respect the repository settings `max_snapshot_bytes_per_sec` and `max_restore_bytes_per_sec` if available and the cluster setting `indices.recovery.max_bytes_per_sec` which you can use to limit the bandwidth they consume. NOTE: This API is intended for exploratory use by humans. You should expect the request parameters and the response format to vary in future versions. NOTE: Different versions of Elasticsearch may perform different checks for repository compatibility, with newer versions typically being stricter than older ones. A storage system that passes repository analysis with one version of Elasticsearch may fail with a different version. This indicates it behaves incorrectly in ways that the former version did not detect. You must work with the supplier of your storage system to address the incompatibilities detected by the repository analysis API in any version of Elasticsearch. NOTE: This API may not work correctly in a mixed-version cluster. *Implementation details* NOTE: This section of documentation describes how the repository analysis API works in this version of Elasticsearch, but you should expect the implementation to vary between versions. The request parameters and response format depend on details of the implementation so may also be different in newer versions. The analysis comprises a number of blob-level tasks, as set by the `blob_count` parameter and a number of compare-and-exchange operations on linearizable registers, as set by the `register_operation_count` parameter. These tasks are distributed over the data and master-eligible nodes in the cluster for execution. For most blob-level tasks, the executing node first writes a blob to the repository and then instructs some of the other nodes in the cluster to attempt to read the data it just wrote. The size of the blob is chosen randomly, according to the `max_blob_size` and `max_total_data_size` parameters. If any of these reads fails then the repository does not implement the necessary read-after-write semantics that Elasticsearch requires. For some blob-level tasks, the executing node will instruct some of its peers to attempt to read the data before the writing process completes. These reads are permitted to fail, but must not return partial data. If any read returns partial data then the repository does not implement the necessary atomicity semantics that Elasticsearch requires. For some blob-level tasks, the executing node will overwrite the blob while its peers are reading it. In this case the data read may come from either the original or the overwritten blob, but the read operation must not return partial data or a mix of data from the two blobs. If any of these reads returns partial data or a mix of the two blobs then the repository does not implement the necessary atomicity semantics that Elasticsearch requires for overwrites. The executing node will use a variety of different methods to write the blob. For instance, where applicable, it will use both single-part and multi-part uploads. Similarly, the reading nodes will use a variety of different methods to read the data back again. For instance they may read the entire blob from start to end or may read only a subset of the data. For some blob-level tasks, the executing node will cancel the write before it is complete. In this case, it still instructs some of the other nodes in the cluster to attempt to read the blob but all of these reads must fail to find the blob. Linearizable registers are special blobs that Elasticsearch manipulates using an atomic compare-and-exchange operation. This operation ensures correct and strongly-consistent behavior even when the blob is accessed by multiple nodes at the same time. The detailed implementation of the compare-and-exchange operation on linearizable registers varies by repository type. Repository analysis verifies that that uncontended compare-and-exchange operations on a linearizable register blob always succeed. Repository analysis also verifies that contended operations either succeed or report the contention but do not return incorrect results. If an operation fails due to contention, Elasticsearch retries the operation until it succeeds. Most of the compare-and-exchange operations performed by repository analysis atomically increment a counter which is represented as an 8-byte blob. Some operations also verify the behavior on small blobs with sizes other than 8 bytes. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-repository-analyze | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-snapshot-repository-analyze | Elasticsearch API documentation} */ async repositoryAnalyze (this: That, params: T.SnapshotRepositoryAnalyzeRequest, options?: TransportRequestOptionsWithOutMeta): Promise async repositoryAnalyze (this: That, params: T.SnapshotRepositoryAnalyzeRequest, options?: TransportRequestOptionsWithMeta): Promise> async repositoryAnalyze (this: That, params: T.SnapshotRepositoryAnalyzeRequest, options?: TransportRequestOptions): Promise async repositoryAnalyze (this: That, params: T.SnapshotRepositoryAnalyzeRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['snapshot.repository_analyze'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -441,13 +670,16 @@ export default class Snapshot { /** * Verify the repository integrity. Verify the integrity of the contents of a snapshot repository. This API enables you to perform a comprehensive check of the contents of a repository, looking for any anomalies in its data or metadata which might prevent you from restoring snapshots from the repository or which might cause future snapshot create or delete operations to fail. If you suspect the integrity of the contents of one of your snapshot repositories, cease all write activity to this repository immediately, set its `read_only` option to `true`, and use this API to verify its integrity. Until you do so: * It may not be possible to restore some snapshots from this repository. * Searchable snapshots may report errors when searched or may have unassigned shards. * Taking snapshots into this repository may fail or may appear to succeed but have created a snapshot which cannot be restored. * Deleting snapshots from this repository may fail or may appear to succeed but leave the underlying data on disk. * Continuing to write to the repository while it is in an invalid state may causing additional damage to its contents. If the API finds any problems with the integrity of the contents of your repository, Elasticsearch will not be able to repair the damage. The only way to bring the repository back into a fully working state after its contents have been damaged is by restoring its contents from a repository backup which was taken before the damage occurred. You must also identify what caused the damage and take action to prevent it from happening again. If you cannot restore a repository backup, register a new repository and use this for all future snapshot operations. In some cases it may be possible to recover some of the contents of a damaged repository, either by restoring as many of its snapshots as needed and taking new snapshots of the restored data, or by using the reindex API to copy data from any searchable snapshots mounted from the damaged repository. Avoid all operations which write to the repository while the verify repository integrity API is running. If something changes the repository contents while an integrity verification is running then Elasticsearch may incorrectly report having detected some anomalies in its contents due to the concurrent writes. It may also incorrectly fail to report some anomalies that the concurrent writes prevented it from detecting. NOTE: This API is intended for exploratory use by humans. You should expect the request parameters and the response format to vary in future versions. NOTE: This API may not work correctly in a mixed-version cluster. The default values for the parameters of this API are designed to limit the impact of the integrity verification on other activities in your cluster. For instance, by default it will only use at most half of the `snapshot_meta` threads to verify the integrity of each snapshot, allowing other snapshot operations to use the other half of this thread pool. If you modify these parameters to speed up the verification process, you risk disrupting other snapshot-related operations in your cluster. For large repositories, consider setting up a separate single-node Elasticsearch cluster just for running the integrity verification API. The response exposes implementation details of the analysis which may change from version to version. The response body format is therefore not considered stable and may be different in newer versions. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-repository-verify-integrity | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-snapshot-repository-verify-integrity | Elasticsearch API documentation} */ async repositoryVerifyIntegrity (this: That, params: T.SnapshotRepositoryVerifyIntegrityRequest, options?: TransportRequestOptionsWithOutMeta): Promise async repositoryVerifyIntegrity (this: That, params: T.SnapshotRepositoryVerifyIntegrityRequest, options?: TransportRequestOptionsWithMeta): Promise> async repositoryVerifyIntegrity (this: That, params: T.SnapshotRepositoryVerifyIntegrityRequest, options?: TransportRequestOptions): Promise async repositoryVerifyIntegrity (this: That, params: T.SnapshotRepositoryVerifyIntegrityRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['snapshot.repository_verify_integrity'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -483,14 +715,18 @@ export default class Snapshot { /** * Restore a snapshot. Restore a snapshot of a cluster or data streams and indices. You can restore a snapshot only to a running cluster with an elected master node. The snapshot repository must be registered and available to the cluster. The snapshot and cluster versions must be compatible. To restore a snapshot, the cluster's global metadata must be writable. Ensure there are't any cluster blocks that prevent writes. The restore operation ignores index blocks. Before you restore a data stream, ensure the cluster contains a matching index template with data streams enabled. To check, use the index management feature in Kibana or the get index template API: ``` GET _index_template/*?filter_path=index_templates.name,index_templates.index_template.index_patterns,index_templates.index_template.data_stream ``` If no such template exists, you can create one or restore a cluster state that contains one. Without a matching index template, a data stream can't roll over or create backing indices. If your snapshot contains data from App Search or Workplace Search, you must restore the Enterprise Search encryption key before you restore the snapshot. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-restore | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-snapshot-restore | Elasticsearch API documentation} */ async restore (this: That, params: T.SnapshotRestoreRequest, options?: TransportRequestOptionsWithOutMeta): Promise async restore (this: That, params: T.SnapshotRestoreRequest, options?: TransportRequestOptionsWithMeta): Promise> async restore (this: That, params: T.SnapshotRestoreRequest, options?: TransportRequestOptions): Promise async restore (this: That, params: T.SnapshotRestoreRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['repository', 'snapshot'] - const acceptedBody: string[] = ['feature_states', 'ignore_index_settings', 'ignore_unavailable', 'include_aliases', 'include_global_state', 'index_settings', 'indices', 'partial', 'rename_pattern', 'rename_replacement'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['snapshot.restore'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -512,8 +748,14 @@ export default class Snapshot { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -531,13 +773,16 @@ export default class Snapshot { /** * Get the snapshot status. Get a detailed description of the current state for each shard participating in the snapshot. Note that this API should be used only to obtain detailed shard-level information for ongoing snapshots. If this detail is not needed or you want to obtain information about one or more existing snapshots, use the get snapshot API. If you omit the `` request path parameter, the request retrieves information only for currently running snapshots. This usage is preferred. If needed, you can specify `` and `` to retrieve information for specific snapshots, even if they're not currently running. WARNING: Using the API to return the status of any snapshots other than currently running snapshots can be expensive. The API requires a read from the repository for each shard in each snapshot. For example, if you have 100 snapshots with 1,000 shards each, an API request that includes all snapshots will require 100,000 reads (100 snapshots x 1,000 shards). Depending on the latency of your storage, such requests can take an extremely long time to return results. These requests can also tax machine resources and, when using cloud storage, incur high processing costs. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-status | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-snapshot-status | Elasticsearch API documentation} */ async status (this: That, params?: T.SnapshotStatusRequest, options?: TransportRequestOptionsWithOutMeta): Promise async status (this: That, params?: T.SnapshotStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> async status (this: That, params?: T.SnapshotStatusRequest, options?: TransportRequestOptions): Promise async status (this: That, params?: T.SnapshotStatusRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['repository', 'snapshot'] + const { + path: acceptedPath + } = this.acceptedParams['snapshot.status'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -585,13 +830,16 @@ export default class Snapshot { /** * Verify a snapshot repository. Check for common misconfigurations in a snapshot repository. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-snapshot-verify-repository | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-snapshot-verify-repository | Elasticsearch API documentation} */ async verifyRepository (this: That, params: T.SnapshotVerifyRepositoryRequest, options?: TransportRequestOptionsWithOutMeta): Promise async verifyRepository (this: That, params: T.SnapshotVerifyRepositoryRequest, options?: TransportRequestOptionsWithMeta): Promise> async verifyRepository (this: That, params: T.SnapshotVerifyRepositoryRequest, options?: TransportRequestOptions): Promise async verifyRepository (this: That, params: T.SnapshotVerifyRepositoryRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['name'] + const { + path: acceptedPath + } = this.acceptedParams['snapshot.verify_repository'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/sql.ts b/src/api/api/sql.ts index 871cb7139..b8be615f7 100644 --- a/src/api/api/sql.ts +++ b/src/api/api/sql.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,24 +21,105 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class Sql { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'sql.clear_cursor': { + path: [], + body: [ + 'cursor' + ], + query: [] + }, + 'sql.delete_async': { + path: [ + 'id' + ], + body: [], + query: [] + }, + 'sql.get_async': { + path: [ + 'id' + ], + body: [], + query: [ + 'delimiter', + 'format', + 'keep_alive', + 'wait_for_completion_timeout' + ] + }, + 'sql.get_async_status': { + path: [ + 'id' + ], + body: [], + query: [] + }, + 'sql.query': { + path: [], + body: [ + 'allow_partial_search_results', + 'catalog', + 'columnar', + 'cursor', + 'fetch_size', + 'field_multi_value_leniency', + 'filter', + 'index_using_frozen', + 'keep_alive', + 'keep_on_completion', + 'page_timeout', + 'params', + 'query', + 'request_timeout', + 'runtime_mappings', + 'time_zone', + 'wait_for_completion_timeout' + ], + query: [ + 'format' + ] + }, + 'sql.translate': { + path: [], + body: [ + 'fetch_size', + 'filter', + 'query', + 'time_zone' + ], + query: [] + } + } } /** * Clear an SQL search cursor. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-sql-clear-cursor | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-sql-clear-cursor | Elasticsearch API documentation} */ async clearCursor (this: That, params: T.SqlClearCursorRequest, options?: TransportRequestOptionsWithOutMeta): Promise async clearCursor (this: That, params: T.SqlClearCursorRequest, options?: TransportRequestOptionsWithMeta): Promise> async clearCursor (this: That, params: T.SqlClearCursorRequest, options?: TransportRequestOptions): Promise async clearCursor (this: That, params: T.SqlClearCursorRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['cursor'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['sql.clear_cursor'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -74,8 +141,14 @@ export default class Sql { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -89,13 +162,16 @@ export default class Sql { /** * Delete an async SQL search. Delete an async SQL search or a stored synchronous SQL search. If the search is still running, the API cancels it. If the Elasticsearch security features are enabled, only the following users can use this API to delete a search: * Users with the `cancel_task` cluster privilege. * The user who first submitted the search. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-sql-delete-async | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-sql-delete-async | Elasticsearch API documentation} */ async deleteAsync (this: That, params: T.SqlDeleteAsyncRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteAsync (this: That, params: T.SqlDeleteAsyncRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteAsync (this: That, params: T.SqlDeleteAsyncRequest, options?: TransportRequestOptions): Promise async deleteAsync (this: That, params: T.SqlDeleteAsyncRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] + const { + path: acceptedPath + } = this.acceptedParams['sql.delete_async'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -131,13 +207,16 @@ export default class Sql { /** * Get async SQL search results. Get the current status and available results for an async SQL search or stored synchronous SQL search. If the Elasticsearch security features are enabled, only the user who first submitted the SQL search can retrieve the search using this API. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-sql-get-async | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-sql-get-async | Elasticsearch API documentation} */ async getAsync (this: That, params: T.SqlGetAsyncRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getAsync (this: That, params: T.SqlGetAsyncRequest, options?: TransportRequestOptionsWithMeta): Promise> async getAsync (this: That, params: T.SqlGetAsyncRequest, options?: TransportRequestOptions): Promise async getAsync (this: That, params: T.SqlGetAsyncRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] + const { + path: acceptedPath + } = this.acceptedParams['sql.get_async'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -173,13 +252,16 @@ export default class Sql { /** * Get the async SQL search status. Get the current status of an async SQL search or a stored synchronous SQL search. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-sql-get-async-status | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-sql-get-async-status | Elasticsearch API documentation} */ async getAsyncStatus (this: That, params: T.SqlGetAsyncStatusRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getAsyncStatus (this: That, params: T.SqlGetAsyncStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> async getAsyncStatus (this: That, params: T.SqlGetAsyncStatusRequest, options?: TransportRequestOptions): Promise async getAsyncStatus (this: That, params: T.SqlGetAsyncStatusRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] + const { + path: acceptedPath + } = this.acceptedParams['sql.get_async_status'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -215,14 +297,18 @@ export default class Sql { /** * Get SQL search results. Run an SQL request. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-sql-query | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-sql-query | Elasticsearch API documentation} */ async query (this: That, params?: T.SqlQueryRequest, options?: TransportRequestOptionsWithOutMeta): Promise async query (this: That, params?: T.SqlQueryRequest, options?: TransportRequestOptionsWithMeta): Promise> async query (this: That, params?: T.SqlQueryRequest, options?: TransportRequestOptions): Promise async query (this: That, params?: T.SqlQueryRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['allow_partial_search_results', 'catalog', 'columnar', 'cursor', 'fetch_size', 'field_multi_value_leniency', 'filter', 'index_using_frozen', 'keep_alive', 'keep_on_completion', 'page_timeout', 'params', 'query', 'request_timeout', 'runtime_mappings', 'time_zone', 'wait_for_completion_timeout'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['sql.query'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -245,8 +331,14 @@ export default class Sql { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -260,14 +352,18 @@ export default class Sql { /** * Translate SQL into Elasticsearch queries. Translate an SQL search into a search API request containing Query DSL. It accepts the same request body parameters as the SQL search API, excluding `cursor`. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-sql-translate | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-sql-translate | Elasticsearch API documentation} */ async translate (this: That, params: T.SqlTranslateRequest, options?: TransportRequestOptionsWithOutMeta): Promise async translate (this: That, params: T.SqlTranslateRequest, options?: TransportRequestOptionsWithMeta): Promise> async translate (this: That, params: T.SqlTranslateRequest, options?: TransportRequestOptions): Promise async translate (this: That, params: T.SqlTranslateRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['fetch_size', 'filter', 'query', 'time_zone'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['sql.translate'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -289,8 +385,14 @@ export default class Sql { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/ssl.ts b/src/api/api/ssl.ts index 29f25f090..272765a68 100644 --- a/src/api/api/ssl.ts +++ b/src/api/api/ssl.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,23 +21,38 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} export default class Ssl { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'ssl.certificates': { + path: [], + body: [], + query: [] + } + } } /** * Get SSL certificates. Get information about the X.509 certificates that are used to encrypt communications in the cluster. The API returns a list that includes certificates from all TLS contexts including: - Settings for transport and HTTP interfaces - TLS settings that are used within authentication realms - TLS settings for remote monitoring exporters The list includes certificates that are used for configuring trust, such as those configured in the `xpack.security.transport.ssl.truststore` and `xpack.security.transport.ssl.certificate_authorities` settings. It also includes certificates that are used for configuring server identity, such as `xpack.security.http.ssl.keystore` and `xpack.security.http.ssl.certificate settings`. The list does not include certificates that are sourced from the default SSL context of the Java Runtime Environment (JRE), even if those certificates are in use within Elasticsearch. NOTE: When a PKCS#11 token is configured as the truststore of the JRE, the API returns all the certificates that are included in the PKCS#11 token irrespective of whether these are used in the Elasticsearch TLS configuration. If Elasticsearch is configured to use a keystore or truststore, the API output includes all certificates in that store, even though some of the certificates might not be in active use within the cluster. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ssl-certificates | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-ssl-certificates | Elasticsearch API documentation} */ async certificates (this: That, params?: T.SslCertificatesRequest, options?: TransportRequestOptionsWithOutMeta): Promise async certificates (this: That, params?: T.SslCertificatesRequest, options?: TransportRequestOptionsWithMeta): Promise> async certificates (this: That, params?: T.SslCertificatesRequest, options?: TransportRequestOptions): Promise async certificates (this: That, params?: T.SslCertificatesRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['ssl.certificates'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/synonyms.ts b/src/api/api/synonyms.ts index 379510816..99e4730a4 100644 --- a/src/api/api/synonyms.ts +++ b/src/api/api/synonyms.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,23 +21,95 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class Synonyms { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'synonyms.delete_synonym': { + path: [ + 'id' + ], + body: [], + query: [] + }, + 'synonyms.delete_synonym_rule': { + path: [ + 'set_id', + 'rule_id' + ], + body: [], + query: [] + }, + 'synonyms.get_synonym': { + path: [ + 'id' + ], + body: [], + query: [ + 'from', + 'size' + ] + }, + 'synonyms.get_synonym_rule': { + path: [ + 'set_id', + 'rule_id' + ], + body: [], + query: [] + }, + 'synonyms.get_synonyms_sets': { + path: [], + body: [], + query: [ + 'from', + 'size' + ] + }, + 'synonyms.put_synonym': { + path: [ + 'id' + ], + body: [ + 'synonyms_set' + ], + query: [] + }, + 'synonyms.put_synonym_rule': { + path: [ + 'set_id', + 'rule_id' + ], + body: [ + 'synonyms' + ], + query: [] + } + } } /** * Delete a synonym set. You can only delete a synonyms set that is not in use by any index analyzer. Synonyms sets can be used in synonym graph token filters and synonym token filters. These synonym filters can be used as part of search analyzers. Analyzers need to be loaded when an index is restored (such as when a node starts, or the index becomes open). Even if the analyzer is not used on any field mapping, it still needs to be loaded on the index recovery phase. If any analyzers cannot be loaded, the index becomes unavailable and the cluster status becomes red or yellow as index shards are not available. To prevent that, synonyms sets that are used in analyzers can't be deleted. A delete request in this case will return a 400 response code. To remove a synonyms set, you must first remove all indices that contain analyzers using it. You can migrate an index by creating a new index that does not contain the token filter with the synonyms set, and use the reindex API in order to copy over the index data. Once finished, you can delete the index. When the synonyms set is not used in analyzers, you will be able to delete it. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-synonyms-delete-synonym | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-synonyms-delete-synonym | Elasticsearch API documentation} */ async deleteSynonym (this: That, params: T.SynonymsDeleteSynonymRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteSynonym (this: That, params: T.SynonymsDeleteSynonymRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteSynonym (this: That, params: T.SynonymsDeleteSynonymRequest, options?: TransportRequestOptions): Promise async deleteSynonym (this: That, params: T.SynonymsDeleteSynonymRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] + const { + path: acceptedPath + } = this.acceptedParams['synonyms.delete_synonym'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -87,13 +145,16 @@ export default class Synonyms { /** * Delete a synonym rule. Delete a synonym rule from a synonym set. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-synonyms-delete-synonym-rule | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-synonyms-delete-synonym-rule | Elasticsearch API documentation} */ async deleteSynonymRule (this: That, params: T.SynonymsDeleteSynonymRuleRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteSynonymRule (this: That, params: T.SynonymsDeleteSynonymRuleRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteSynonymRule (this: That, params: T.SynonymsDeleteSynonymRuleRequest, options?: TransportRequestOptions): Promise async deleteSynonymRule (this: That, params: T.SynonymsDeleteSynonymRuleRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['set_id', 'rule_id'] + const { + path: acceptedPath + } = this.acceptedParams['synonyms.delete_synonym_rule'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -130,13 +191,16 @@ export default class Synonyms { /** * Get a synonym set. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-synonyms-get-synonym | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-synonyms-get-synonym | Elasticsearch API documentation} */ async getSynonym (this: That, params: T.SynonymsGetSynonymRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getSynonym (this: That, params: T.SynonymsGetSynonymRequest, options?: TransportRequestOptionsWithMeta): Promise> async getSynonym (this: That, params: T.SynonymsGetSynonymRequest, options?: TransportRequestOptions): Promise async getSynonym (this: That, params: T.SynonymsGetSynonymRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] + const { + path: acceptedPath + } = this.acceptedParams['synonyms.get_synonym'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -172,13 +236,16 @@ export default class Synonyms { /** * Get a synonym rule. Get a synonym rule from a synonym set. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-synonyms-get-synonym-rule | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-synonyms-get-synonym-rule | Elasticsearch API documentation} */ async getSynonymRule (this: That, params: T.SynonymsGetSynonymRuleRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getSynonymRule (this: That, params: T.SynonymsGetSynonymRuleRequest, options?: TransportRequestOptionsWithMeta): Promise> async getSynonymRule (this: That, params: T.SynonymsGetSynonymRuleRequest, options?: TransportRequestOptions): Promise async getSynonymRule (this: That, params: T.SynonymsGetSynonymRuleRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['set_id', 'rule_id'] + const { + path: acceptedPath + } = this.acceptedParams['synonyms.get_synonym_rule'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -215,13 +282,16 @@ export default class Synonyms { /** * Get all synonym sets. Get a summary of all defined synonym sets. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-synonyms-get-synonym | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-synonyms-get-synonym | Elasticsearch API documentation} */ async getSynonymsSets (this: That, params?: T.SynonymsGetSynonymsSetsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getSynonymsSets (this: That, params?: T.SynonymsGetSynonymsSetsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getSynonymsSets (this: That, params?: T.SynonymsGetSynonymsSetsRequest, options?: TransportRequestOptions): Promise async getSynonymsSets (this: That, params?: T.SynonymsGetSynonymsSetsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['synonyms.get_synonyms_sets'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -255,14 +325,18 @@ export default class Synonyms { /** * Create or update a synonym set. Synonyms sets are limited to a maximum of 10,000 synonym rules per set. If you need to manage more synonym rules, you can create multiple synonym sets. When an existing synonyms set is updated, the search analyzers that use the synonyms set are reloaded automatically for all indices. This is equivalent to invoking the reload search analyzers API for all indices that use the synonyms set. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-synonyms-put-synonym | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-synonyms-put-synonym | Elasticsearch API documentation} */ async putSynonym (this: That, params: T.SynonymsPutSynonymRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putSynonym (this: That, params: T.SynonymsPutSynonymRequest, options?: TransportRequestOptionsWithMeta): Promise> async putSynonym (this: That, params: T.SynonymsPutSynonymRequest, options?: TransportRequestOptions): Promise async putSynonym (this: That, params: T.SynonymsPutSynonymRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] - const acceptedBody: string[] = ['synonyms_set'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['synonyms.put_synonym'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -284,8 +358,14 @@ export default class Synonyms { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -302,14 +382,18 @@ export default class Synonyms { /** * Create or update a synonym rule. Create or update a synonym rule in a synonym set. If any of the synonym rules included is invalid, the API returns an error. When you update a synonym rule, all analyzers using the synonyms set will be reloaded automatically to reflect the new rule. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-synonyms-put-synonym-rule | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-synonyms-put-synonym-rule | Elasticsearch API documentation} */ async putSynonymRule (this: That, params: T.SynonymsPutSynonymRuleRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putSynonymRule (this: That, params: T.SynonymsPutSynonymRuleRequest, options?: TransportRequestOptionsWithMeta): Promise> async putSynonymRule (this: That, params: T.SynonymsPutSynonymRuleRequest, options?: TransportRequestOptions): Promise async putSynonymRule (this: That, params: T.SynonymsPutSynonymRuleRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['set_id', 'rule_id'] - const acceptedBody: string[] = ['synonyms'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['synonyms.put_synonym_rule'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -331,8 +415,14 @@ export default class Synonyms { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/tasks.ts b/src/api/api/tasks.ts index a8f7ccf20..e7afc408a 100644 --- a/src/api/api/tasks.ts +++ b/src/api/api/tasks.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,23 +21,68 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} export default class Tasks { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'tasks.cancel': { + path: [ + 'task_id' + ], + body: [], + query: [ + 'actions', + 'nodes', + 'parent_task_id', + 'wait_for_completion' + ] + }, + 'tasks.get': { + path: [ + 'task_id' + ], + body: [], + query: [ + 'timeout', + 'wait_for_completion' + ] + }, + 'tasks.list': { + path: [], + body: [], + query: [ + 'actions', + 'detailed', + 'group_by', + 'nodes', + 'parent_task_id', + 'timeout', + 'wait_for_completion' + ] + } + } } /** * Cancel a task. WARNING: The task management API is new and should still be considered a beta feature. The API may change in ways that are not backwards compatible. A task may continue to run for some time after it has been cancelled because it may not be able to safely stop its current activity straight away. It is also possible that Elasticsearch must complete its work on other tasks before it can process the cancellation. The get task information API will continue to list these cancelled tasks until they complete. The cancelled flag in the response indicates that the cancellation command has been processed and the task will stop as soon as possible. To troubleshoot why a cancelled task does not complete promptly, use the get task information API with the `?detailed` parameter to identify the other tasks the system is running. You can also use the node hot threads API to obtain detailed information about the work the system is doing instead of completing the cancelled task. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-tasks | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/group/endpoint-tasks | Elasticsearch API documentation} */ async cancel (this: That, params?: T.TasksCancelRequest, options?: TransportRequestOptionsWithOutMeta): Promise async cancel (this: That, params?: T.TasksCancelRequest, options?: TransportRequestOptionsWithMeta): Promise> async cancel (this: That, params?: T.TasksCancelRequest, options?: TransportRequestOptions): Promise async cancel (this: That, params?: T.TasksCancelRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['task_id'] + const { + path: acceptedPath + } = this.acceptedParams['tasks.cancel'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -95,13 +126,16 @@ export default class Tasks { /** * Get task information. Get information about a task currently running in the cluster. WARNING: The task management API is new and should still be considered a beta feature. The API may change in ways that are not backwards compatible. If the task identifier is not found, a 404 response code indicates that there are no resources that match the request. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-tasks | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/group/endpoint-tasks | Elasticsearch API documentation} */ async get (this: That, params: T.TasksGetRequest, options?: TransportRequestOptionsWithOutMeta): Promise async get (this: That, params: T.TasksGetRequest, options?: TransportRequestOptionsWithMeta): Promise> async get (this: That, params: T.TasksGetRequest, options?: TransportRequestOptions): Promise async get (this: That, params: T.TasksGetRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['task_id'] + const { + path: acceptedPath + } = this.acceptedParams['tasks.get'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -137,13 +171,16 @@ export default class Tasks { /** * Get all tasks. Get information about the tasks currently running on one or more nodes in the cluster. WARNING: The task management API is new and should still be considered a beta feature. The API may change in ways that are not backwards compatible. **Identifying running tasks** The `X-Opaque-Id header`, when provided on the HTTP request header, is going to be returned as a header in the response as well as in the headers field for in the task information. This enables you to track certain calls or associate certain tasks with the client that started them. For example: ``` curl -i -H "X-Opaque-Id: 123456" "http://localhost:9200/_tasks?group_by=parents" ``` The API returns the following result: ``` HTTP/1.1 200 OK X-Opaque-Id: 123456 content-type: application/json; charset=UTF-8 content-length: 831 { "tasks" : { "u5lcZHqcQhu-rUoFaqDphA:45" : { "node" : "u5lcZHqcQhu-rUoFaqDphA", "id" : 45, "type" : "transport", "action" : "cluster:monitor/tasks/lists", "start_time_in_millis" : 1513823752749, "running_time_in_nanos" : 293139, "cancellable" : false, "headers" : { "X-Opaque-Id" : "123456" }, "children" : [ { "node" : "u5lcZHqcQhu-rUoFaqDphA", "id" : 46, "type" : "direct", "action" : "cluster:monitor/tasks/lists[n]", "start_time_in_millis" : 1513823752750, "running_time_in_nanos" : 92133, "cancellable" : false, "parent_task_id" : "u5lcZHqcQhu-rUoFaqDphA:45", "headers" : { "X-Opaque-Id" : "123456" } } ] } } } ``` In this example, `X-Opaque-Id: 123456` is the ID as a part of the response header. The `X-Opaque-Id` in the task `headers` is the ID for the task that was initiated by the REST request. The `X-Opaque-Id` in the children `headers` is the child task of the task that was initiated by the REST request. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-tasks | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/group/endpoint-tasks | Elasticsearch API documentation} */ async list (this: That, params?: T.TasksListRequest, options?: TransportRequestOptionsWithOutMeta): Promise async list (this: That, params?: T.TasksListRequest, options?: TransportRequestOptionsWithMeta): Promise> async list (this: That, params?: T.TasksListRequest, options?: TransportRequestOptions): Promise async list (this: That, params?: T.TasksListRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['tasks.list'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/terms_enum.ts b/src/api/api/terms_enum.ts index ad9fa1e0e..2c2359baa 100644 --- a/src/api/api/terms_enum.ts +++ b/src/api/api/terms_enum.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,18 +21,45 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +const acceptedParams: Record = { + terms_enum: { + path: [ + 'index' + ], + body: [ + 'field', + 'size', + 'timeout', + 'case_insensitive', + 'index_filter', + 'string', + 'search_after' + ], + query: [] + } +} /** * Get terms in an index. Discover terms that match a partial string in an index. This API is designed for low-latency look-ups used in auto-complete scenarios. > info > The terms enum API may return terms from deleted documents. Deleted documents are initially only marked as deleted. It is not until their segments are merged that documents are actually deleted. Until that happens, the terms enum API will return terms from these documents. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-terms-enum | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-terms-enum | Elasticsearch API documentation} */ export default async function TermsEnumApi (this: That, params: T.TermsEnumRequest, options?: TransportRequestOptionsWithOutMeta): Promise export default async function TermsEnumApi (this: That, params: T.TermsEnumRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function TermsEnumApi (this: That, params: T.TermsEnumRequest, options?: TransportRequestOptions): Promise export default async function TermsEnumApi (this: That, params: T.TermsEnumRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['field', 'size', 'timeout', 'case_insensitive', 'index_filter', 'string', 'search_after'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = acceptedParams.terms_enum + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -68,8 +81,14 @@ export default async function TermsEnumApi (this: That, params: T.TermsEnumReque } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/termvectors.ts b/src/api/api/termvectors.ts index c3f461487..0e3205a86 100644 --- a/src/api/api/termvectors.ts +++ b/src/api/api/termvectors.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,18 +21,63 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +const acceptedParams: Record = { + termvectors: { + path: [ + 'index', + 'id' + ], + body: [ + 'doc', + 'filter', + 'per_field_analyzer', + 'fields', + 'field_statistics', + 'offsets', + 'payloads', + 'positions', + 'term_statistics', + 'routing', + 'version', + 'version_type' + ], + query: [ + 'fields', + 'field_statistics', + 'offsets', + 'payloads', + 'positions', + 'preference', + 'realtime', + 'routing', + 'term_statistics', + 'version', + 'version_type' + ] + } +} /** * Get term vector information. Get information and statistics about terms in the fields of a particular document. You can retrieve term vectors for documents stored in the index or for artificial documents passed in the body of the request. You can specify the fields you are interested in through the `fields` parameter or by adding the fields to the request body. For example: ``` GET /my-index-000001/_termvectors/1?fields=message ``` Fields can be specified using wildcards, similar to the multi match query. Term vectors are real-time by default, not near real-time. This can be changed by setting `realtime` parameter to `false`. You can request three types of values: _term information_, _term statistics_, and _field statistics_. By default, all term information and field statistics are returned for all fields but term statistics are excluded. **Term information** * term frequency in the field (always returned) * term positions (`positions: true`) * start and end offsets (`offsets: true`) * term payloads (`payloads: true`), as base64 encoded bytes If the requested information wasn't stored in the index, it will be computed on the fly if possible. Additionally, term vectors could be computed for documents not even existing in the index, but instead provided by the user. > warn > Start and end offsets assume UTF-16 encoding is being used. If you want to use these offsets in order to get the original text that produced this token, you should make sure that the string you are taking a sub-string of is also encoded using UTF-16. **Behaviour** The term and field statistics are not accurate. Deleted documents are not taken into account. The information is only retrieved for the shard the requested document resides in. The term and field statistics are therefore only useful as relative measures whereas the absolute numbers have no meaning in this context. By default, when requesting term vectors of artificial documents, a shard to get the statistics from is randomly selected. Use `routing` only to hit a particular shard. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-termvectors | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-termvectors | Elasticsearch API documentation} */ export default async function TermvectorsApi (this: That, params: T.TermvectorsRequest, options?: TransportRequestOptionsWithOutMeta): Promise export default async function TermvectorsApi (this: That, params: T.TermvectorsRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function TermvectorsApi (this: That, params: T.TermvectorsRequest, options?: TransportRequestOptions): Promise export default async function TermvectorsApi (this: That, params: T.TermvectorsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index', 'id'] - const acceptedBody: string[] = ['doc', 'filter', 'per_field_analyzer'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = acceptedParams.termvectors + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -68,8 +99,14 @@ export default async function TermvectorsApi (this: That, p } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/text_structure.ts b/src/api/api/text_structure.ts index fd245e577..72521758a 100644 --- a/src/api/api/text_structure.ts +++ b/src/api/api/text_structure.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,23 +21,107 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class TextStructure { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'text_structure.find_field_structure': { + path: [], + body: [], + query: [ + 'column_names', + 'delimiter', + 'documents_to_sample', + 'ecs_compatibility', + 'explain', + 'field', + 'format', + 'grok_pattern', + 'index', + 'quote', + 'should_trim_fields', + 'timeout', + 'timestamp_field', + 'timestamp_format' + ] + }, + 'text_structure.find_message_structure': { + path: [], + body: [ + 'messages' + ], + query: [ + 'column_names', + 'delimiter', + 'ecs_compatibility', + 'explain', + 'format', + 'grok_pattern', + 'quote', + 'should_trim_fields', + 'timeout', + 'timestamp_field', + 'timestamp_format' + ] + }, + 'text_structure.find_structure': { + path: [], + body: [ + 'text_files' + ], + query: [ + 'charset', + 'column_names', + 'delimiter', + 'ecs_compatibility', + 'explain', + 'format', + 'grok_pattern', + 'has_header_row', + 'line_merge_size_limit', + 'lines_to_sample', + 'quote', + 'should_trim_fields', + 'timeout', + 'timestamp_field', + 'timestamp_format' + ] + }, + 'text_structure.test_grok_pattern': { + path: [], + body: [ + 'grok_pattern', + 'text' + ], + query: [ + 'ecs_compatibility' + ] + } + } } /** * Find the structure of a text field. Find the structure of a text field in an Elasticsearch index. This API provides a starting point for extracting further information from log messages already ingested into Elasticsearch. For example, if you have ingested data into a very simple index that has just `@timestamp` and message fields, you can use this API to see what common structure exists in the message field. The response from the API contains: * Sample messages. * Statistics that reveal the most common values for all fields detected within the text and basic numeric statistics for numeric fields. * Information about the structure of the text, which is useful when you write ingest configurations to index it or similarly formatted text. * Appropriate mappings for an Elasticsearch index, which you could use to ingest the text. All this information can be calculated by the structure finder with no guidance. However, you can optionally override some of the decisions about the text structure by specifying one or more query parameters. If the structure finder produces unexpected results, specify the `explain` query parameter and an explanation will appear in the response. It helps determine why the returned structure was chosen. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-text_structure | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/group/endpoint-text_structure | Elasticsearch API documentation} */ async findFieldStructure (this: That, params: T.TextStructureFindFieldStructureRequest, options?: TransportRequestOptionsWithOutMeta): Promise async findFieldStructure (this: That, params: T.TextStructureFindFieldStructureRequest, options?: TransportRequestOptionsWithMeta): Promise> async findFieldStructure (this: That, params: T.TextStructureFindFieldStructureRequest, options?: TransportRequestOptions): Promise async findFieldStructure (this: That, params: T.TextStructureFindFieldStructureRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['text_structure.find_field_structure'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -84,14 +154,18 @@ export default class TextStructure { /** * Find the structure of text messages. Find the structure of a list of text messages. The messages must contain data that is suitable to be ingested into Elasticsearch. This API provides a starting point for ingesting data into Elasticsearch in a format that is suitable for subsequent use with other Elastic Stack functionality. Use this API rather than the find text structure API if your input text has already been split up into separate messages by some other process. The response from the API contains: * Sample messages. * Statistics that reveal the most common values for all fields detected within the text and basic numeric statistics for numeric fields. * Information about the structure of the text, which is useful when you write ingest configurations to index it or similarly formatted text. Appropriate mappings for an Elasticsearch index, which you could use to ingest the text. All this information can be calculated by the structure finder with no guidance. However, you can optionally override some of the decisions about the text structure by specifying one or more query parameters. If the structure finder produces unexpected results, specify the `explain` query parameter and an explanation will appear in the response. It helps determine why the returned structure was chosen. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-text-structure-find-message-structure | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-text-structure-find-message-structure | Elasticsearch API documentation} */ async findMessageStructure (this: That, params: T.TextStructureFindMessageStructureRequest, options?: TransportRequestOptionsWithOutMeta): Promise async findMessageStructure (this: That, params: T.TextStructureFindMessageStructureRequest, options?: TransportRequestOptionsWithMeta): Promise> async findMessageStructure (this: That, params: T.TextStructureFindMessageStructureRequest, options?: TransportRequestOptions): Promise async findMessageStructure (this: That, params: T.TextStructureFindMessageStructureRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['messages'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['text_structure.find_message_structure'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -113,8 +187,14 @@ export default class TextStructure { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -128,14 +208,18 @@ export default class TextStructure { /** * Find the structure of a text file. The text file must contain data that is suitable to be ingested into Elasticsearch. This API provides a starting point for ingesting data into Elasticsearch in a format that is suitable for subsequent use with other Elastic Stack functionality. Unlike other Elasticsearch endpoints, the data that is posted to this endpoint does not need to be UTF-8 encoded and in JSON format. It must, however, be text; binary text formats are not currently supported. The size is limited to the Elasticsearch HTTP receive buffer size, which defaults to 100 Mb. The response from the API contains: * A couple of messages from the beginning of the text. * Statistics that reveal the most common values for all fields detected within the text and basic numeric statistics for numeric fields. * Information about the structure of the text, which is useful when you write ingest configurations to index it or similarly formatted text. * Appropriate mappings for an Elasticsearch index, which you could use to ingest the text. All this information can be calculated by the structure finder with no guidance. However, you can optionally override some of the decisions about the text structure by specifying one or more query parameters. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-text-structure-find-structure | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-text-structure-find-structure | Elasticsearch API documentation} */ async findStructure (this: That, params: T.TextStructureFindStructureRequest, options?: TransportRequestOptionsWithOutMeta): Promise async findStructure (this: That, params: T.TextStructureFindStructureRequest, options?: TransportRequestOptionsWithMeta): Promise> async findStructure (this: That, params: T.TextStructureFindStructureRequest, options?: TransportRequestOptions): Promise async findStructure (this: That, params: T.TextStructureFindStructureRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['text_files'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['text_structure.find_structure'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -147,8 +231,14 @@ export default class TextStructure { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -162,14 +252,18 @@ export default class TextStructure { /** * Test a Grok pattern. Test a Grok pattern on one or more lines of text. The API indicates whether the lines match the pattern together with the offsets and lengths of the matched substrings. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-text-structure-test-grok-pattern | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-text-structure-test-grok-pattern | Elasticsearch API documentation} */ async testGrokPattern (this: That, params: T.TextStructureTestGrokPatternRequest, options?: TransportRequestOptionsWithOutMeta): Promise async testGrokPattern (this: That, params: T.TextStructureTestGrokPatternRequest, options?: TransportRequestOptionsWithMeta): Promise> async testGrokPattern (this: That, params: T.TextStructureTestGrokPatternRequest, options?: TransportRequestOptions): Promise async testGrokPattern (this: That, params: T.TextStructureTestGrokPatternRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['grok_pattern', 'text'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['text_structure.test_grok_pattern'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -191,8 +285,14 @@ export default class TextStructure { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/transform.ts b/src/api/api/transform.ts index 4872de3e1..d09a65d5b 100644 --- a/src/api/api/transform.ts +++ b/src/api/api/transform.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,23 +21,184 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class Transform { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'transform.delete_transform': { + path: [ + 'transform_id' + ], + body: [], + query: [ + 'force', + 'delete_dest_index', + 'timeout' + ] + }, + 'transform.get_node_stats': { + path: [], + body: [], + query: [] + }, + 'transform.get_transform': { + path: [ + 'transform_id' + ], + body: [], + query: [ + 'allow_no_match', + 'from', + 'size', + 'exclude_generated' + ] + }, + 'transform.get_transform_stats': { + path: [ + 'transform_id' + ], + body: [], + query: [ + 'allow_no_match', + 'from', + 'size', + 'timeout' + ] + }, + 'transform.preview_transform': { + path: [ + 'transform_id' + ], + body: [ + 'dest', + 'description', + 'frequency', + 'pivot', + 'source', + 'settings', + 'sync', + 'retention_policy', + 'latest' + ], + query: [ + 'timeout' + ] + }, + 'transform.put_transform': { + path: [ + 'transform_id' + ], + body: [ + 'dest', + 'description', + 'frequency', + 'latest', + '_meta', + 'pivot', + 'retention_policy', + 'settings', + 'source', + 'sync' + ], + query: [ + 'defer_validation', + 'timeout' + ] + }, + 'transform.reset_transform': { + path: [ + 'transform_id' + ], + body: [], + query: [ + 'force', + 'timeout' + ] + }, + 'transform.schedule_now_transform': { + path: [ + 'transform_id' + ], + body: [], + query: [ + 'timeout' + ] + }, + 'transform.start_transform': { + path: [ + 'transform_id' + ], + body: [], + query: [ + 'timeout', + 'from' + ] + }, + 'transform.stop_transform': { + path: [ + 'transform_id' + ], + body: [], + query: [ + 'allow_no_match', + 'force', + 'timeout', + 'wait_for_checkpoint', + 'wait_for_completion' + ] + }, + 'transform.update_transform': { + path: [ + 'transform_id' + ], + body: [ + 'dest', + 'description', + 'frequency', + '_meta', + 'source', + 'settings', + 'sync', + 'retention_policy' + ], + query: [ + 'defer_validation', + 'timeout' + ] + }, + 'transform.upgrade_transforms': { + path: [], + body: [], + query: [ + 'dry_run', + 'timeout' + ] + } + } } /** * Delete a transform. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-delete-transform | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-transform-delete-transform | Elasticsearch API documentation} */ async deleteTransform (this: That, params: T.TransformDeleteTransformRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteTransform (this: That, params: T.TransformDeleteTransformRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteTransform (this: That, params: T.TransformDeleteTransformRequest, options?: TransportRequestOptions): Promise async deleteTransform (this: That, params: T.TransformDeleteTransformRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['transform_id'] + const { + path: acceptedPath + } = this.acceptedParams['transform.delete_transform'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -87,13 +234,16 @@ export default class Transform { /** * Retrieves transform usage information for transform nodes. - * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-transform-node-stats.html | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/9.0/get-transform-node-stats.html | Elasticsearch API documentation} */ async getNodeStats (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise async getNodeStats (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> async getNodeStats (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise async getNodeStats (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['transform.get_node_stats'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -126,13 +276,16 @@ export default class Transform { /** * Get transforms. Get configuration information for transforms. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-get-transform | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-transform-get-transform | Elasticsearch API documentation} */ async getTransform (this: That, params?: T.TransformGetTransformRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getTransform (this: That, params?: T.TransformGetTransformRequest, options?: TransportRequestOptionsWithMeta): Promise> async getTransform (this: That, params?: T.TransformGetTransformRequest, options?: TransportRequestOptions): Promise async getTransform (this: That, params?: T.TransformGetTransformRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['transform_id'] + const { + path: acceptedPath + } = this.acceptedParams['transform.get_transform'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -176,13 +329,16 @@ export default class Transform { /** * Get transform stats. Get usage information for transforms. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-get-transform-stats | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-transform-get-transform-stats | Elasticsearch API documentation} */ async getTransformStats (this: That, params: T.TransformGetTransformStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getTransformStats (this: That, params: T.TransformGetTransformStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getTransformStats (this: That, params: T.TransformGetTransformStatsRequest, options?: TransportRequestOptions): Promise async getTransformStats (this: That, params: T.TransformGetTransformStatsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['transform_id'] + const { + path: acceptedPath + } = this.acceptedParams['transform.get_transform_stats'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -218,14 +374,18 @@ export default class Transform { /** * Preview a transform. Generates a preview of the results that you will get when you create a transform with the same configuration. It returns a maximum of 100 results. The calculations are based on all the current data in the source index. It also generates a list of mappings and settings for the destination index. These values are determined based on the field types of the source index and the transform aggregations. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-preview-transform | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-transform-preview-transform | Elasticsearch API documentation} */ async previewTransform (this: That, params?: T.TransformPreviewTransformRequest, options?: TransportRequestOptionsWithOutMeta): Promise> async previewTransform (this: That, params?: T.TransformPreviewTransformRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> async previewTransform (this: That, params?: T.TransformPreviewTransformRequest, options?: TransportRequestOptions): Promise> async previewTransform (this: That, params?: T.TransformPreviewTransformRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['transform_id'] - const acceptedBody: string[] = ['dest', 'description', 'frequency', 'pivot', 'source', 'settings', 'sync', 'retention_policy', 'latest'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['transform.preview_transform'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -248,8 +408,14 @@ export default class Transform { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -273,14 +439,18 @@ export default class Transform { /** * Create a transform. Creates a transform. A transform copies data from source indices, transforms it, and persists it into an entity-centric destination index. You can also think of the destination index as a two-dimensional tabular data structure (known as a data frame). The ID for each document in the data frame is generated from a hash of the entity, so there is a unique row per entity. You must choose either the latest or pivot method for your transform; you cannot use both in a single transform. If you choose to use the pivot method for your transform, the entities are defined by the set of `group_by` fields in the pivot object. If you choose to use the latest method, the entities are defined by the `unique_key` field values in the latest object. You must have `create_index`, `index`, and `read` privileges on the destination index and `read` and `view_index_metadata` privileges on the source indices. When Elasticsearch security features are enabled, the transform remembers which roles the user that created it had at the time of creation and uses those same roles. If those roles do not have the required privileges on the source and destination indices, the transform fails when it attempts unauthorized operations. NOTE: You must use Kibana or this API to create a transform. Do not add a transform directly into any `.transform-internal*` indices using the Elasticsearch index API. If Elasticsearch security features are enabled, do not give users any privileges on `.transform-internal*` indices. If you used transforms prior to 7.5, also do not give users any privileges on `.data-frame-internal*` indices. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-put-transform | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-transform-put-transform | Elasticsearch API documentation} */ async putTransform (this: That, params: T.TransformPutTransformRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putTransform (this: That, params: T.TransformPutTransformRequest, options?: TransportRequestOptionsWithMeta): Promise> async putTransform (this: That, params: T.TransformPutTransformRequest, options?: TransportRequestOptions): Promise async putTransform (this: That, params: T.TransformPutTransformRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['transform_id'] - const acceptedBody: string[] = ['dest', 'description', 'frequency', 'latest', '_meta', 'pivot', 'retention_policy', 'settings', 'source', 'sync'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['transform.put_transform'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -302,8 +472,14 @@ export default class Transform { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -320,13 +496,16 @@ export default class Transform { /** * Reset a transform. Before you can reset it, you must stop it; alternatively, use the `force` query parameter. If the destination index was created by the transform, it is deleted. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-reset-transform | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-transform-reset-transform | Elasticsearch API documentation} */ async resetTransform (this: That, params: T.TransformResetTransformRequest, options?: TransportRequestOptionsWithOutMeta): Promise async resetTransform (this: That, params: T.TransformResetTransformRequest, options?: TransportRequestOptionsWithMeta): Promise> async resetTransform (this: That, params: T.TransformResetTransformRequest, options?: TransportRequestOptions): Promise async resetTransform (this: That, params: T.TransformResetTransformRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['transform_id'] + const { + path: acceptedPath + } = this.acceptedParams['transform.reset_transform'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -362,13 +541,16 @@ export default class Transform { /** * Schedule a transform to start now. Instantly run a transform to process data. If you run this API, the transform will process the new data instantly, without waiting for the configured frequency interval. After the API is called, the transform will be processed again at `now + frequency` unless the API is called again in the meantime. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-schedule-now-transform | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-transform-schedule-now-transform | Elasticsearch API documentation} */ async scheduleNowTransform (this: That, params: T.TransformScheduleNowTransformRequest, options?: TransportRequestOptionsWithOutMeta): Promise async scheduleNowTransform (this: That, params: T.TransformScheduleNowTransformRequest, options?: TransportRequestOptionsWithMeta): Promise> async scheduleNowTransform (this: That, params: T.TransformScheduleNowTransformRequest, options?: TransportRequestOptions): Promise async scheduleNowTransform (this: That, params: T.TransformScheduleNowTransformRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['transform_id'] + const { + path: acceptedPath + } = this.acceptedParams['transform.schedule_now_transform'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -404,13 +586,16 @@ export default class Transform { /** * Start a transform. When you start a transform, it creates the destination index if it does not already exist. The `number_of_shards` is set to `1` and the `auto_expand_replicas` is set to `0-1`. If it is a pivot transform, it deduces the mapping definitions for the destination index from the source indices and the transform aggregations. If fields in the destination index are derived from scripts (as in the case of `scripted_metric` or `bucket_script` aggregations), the transform uses dynamic mappings unless an index template exists. If it is a latest transform, it does not deduce mapping definitions; it uses dynamic mappings. To use explicit mappings, create the destination index before you start the transform. Alternatively, you can create an index template, though it does not affect the deduced mappings in a pivot transform. When the transform starts, a series of validations occur to ensure its success. If you deferred validation when you created the transform, they occur when you start the transform—with the exception of privilege checks. When Elasticsearch security features are enabled, the transform remembers which roles the user that created it had at the time of creation and uses those same roles. If those roles do not have the required privileges on the source and destination indices, the transform fails when it attempts unauthorized operations. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-start-transform | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-transform-start-transform | Elasticsearch API documentation} */ async startTransform (this: That, params: T.TransformStartTransformRequest, options?: TransportRequestOptionsWithOutMeta): Promise async startTransform (this: That, params: T.TransformStartTransformRequest, options?: TransportRequestOptionsWithMeta): Promise> async startTransform (this: That, params: T.TransformStartTransformRequest, options?: TransportRequestOptions): Promise async startTransform (this: That, params: T.TransformStartTransformRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['transform_id'] + const { + path: acceptedPath + } = this.acceptedParams['transform.start_transform'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -446,13 +631,16 @@ export default class Transform { /** * Stop transforms. Stops one or more transforms. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-stop-transform | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-transform-stop-transform | Elasticsearch API documentation} */ async stopTransform (this: That, params: T.TransformStopTransformRequest, options?: TransportRequestOptionsWithOutMeta): Promise async stopTransform (this: That, params: T.TransformStopTransformRequest, options?: TransportRequestOptionsWithMeta): Promise> async stopTransform (this: That, params: T.TransformStopTransformRequest, options?: TransportRequestOptions): Promise async stopTransform (this: That, params: T.TransformStopTransformRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['transform_id'] + const { + path: acceptedPath + } = this.acceptedParams['transform.stop_transform'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -488,14 +676,18 @@ export default class Transform { /** * Update a transform. Updates certain properties of a transform. All updated properties except `description` do not take effect until after the transform starts the next checkpoint, thus there is data consistency in each checkpoint. To use this API, you must have `read` and `view_index_metadata` privileges for the source indices. You must also have `index` and `read` privileges for the destination index. When Elasticsearch security features are enabled, the transform remembers which roles the user who updated it had at the time of update and runs with those privileges. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-update-transform | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-transform-update-transform | Elasticsearch API documentation} */ async updateTransform (this: That, params: T.TransformUpdateTransformRequest, options?: TransportRequestOptionsWithOutMeta): Promise async updateTransform (this: That, params: T.TransformUpdateTransformRequest, options?: TransportRequestOptionsWithMeta): Promise> async updateTransform (this: That, params: T.TransformUpdateTransformRequest, options?: TransportRequestOptions): Promise async updateTransform (this: That, params: T.TransformUpdateTransformRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['transform_id'] - const acceptedBody: string[] = ['dest', 'description', 'frequency', '_meta', 'source', 'settings', 'sync', 'retention_policy'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['transform.update_transform'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -517,8 +709,14 @@ export default class Transform { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -535,13 +733,16 @@ export default class Transform { /** * Upgrade all transforms. Transforms are compatible across minor versions and between supported major versions. However, over time, the format of transform configuration information may change. This API identifies transforms that have a legacy configuration format and upgrades them to the latest version. It also cleans up the internal data structures that store the transform state and checkpoints. The upgrade does not affect the source and destination indices. The upgrade also does not affect the roles that transforms use when Elasticsearch security features are enabled; the role used to read source data and write to the destination index remains unchanged. If a transform upgrade step fails, the upgrade stops and an error is returned about the underlying issue. Resolve the issue then re-run the process again. A summary is returned when the upgrade is finished. To ensure continuous transforms remain running during a major version upgrade of the cluster – for example, from 7.16 to 8.0 – it is recommended to upgrade transforms before upgrading the cluster. You may want to perform a recent cluster backup prior to the upgrade. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-transform-upgrade-transforms | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-transform-upgrade-transforms | Elasticsearch API documentation} */ async upgradeTransforms (this: That, params?: T.TransformUpgradeTransformsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async upgradeTransforms (this: That, params?: T.TransformUpgradeTransformsRequest, options?: TransportRequestOptionsWithMeta): Promise> async upgradeTransforms (this: That, params?: T.TransformUpgradeTransformsRequest, options?: TransportRequestOptions): Promise async upgradeTransforms (this: That, params?: T.TransformUpgradeTransformsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['transform.upgrade_transforms'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/update.ts b/src/api/api/update.ts index 06d06ae63..ac8afe575 100644 --- a/src/api/api/update.ts +++ b/src/api/api/update.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,18 +21,60 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] + +const acceptedParams: Record = { + update: { + path: [ + 'id', + 'index' + ], + body: [ + 'detect_noop', + 'doc', + 'doc_as_upsert', + 'script', + 'scripted_upsert', + '_source', + 'upsert' + ], + query: [ + 'if_primary_term', + 'if_seq_no', + 'include_source_on_error', + 'lang', + 'refresh', + 'require_alias', + 'retry_on_conflict', + 'routing', + 'timeout', + 'wait_for_active_shards', + '_source', + '_source_excludes', + '_source_includes' + ] + } +} /** * Update a document. Update a document by running a script or passing a partial document. If the Elasticsearch security features are enabled, you must have the `index` or `write` index privilege for the target index or index alias. The script can update, delete, or skip modifying the document. The API also supports passing a partial document, which is merged into the existing document. To fully replace an existing document, use the index API. This operation: * Gets the document (collocated with the shard) from the index. * Runs the specified script. * Indexes the result. The document must still be reindexed, but using this API removes some network roundtrips and reduces chances of version conflicts between the GET and the index operation. The `_source` field must be enabled to use this API. In addition to `_source`, you can access the following variables through the `ctx` map: `_index`, `_type`, `_id`, `_version`, `_routing`, and `_now` (the current timestamp). - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-update | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-update | Elasticsearch API documentation} */ export default async function UpdateApi (this: That, params: T.UpdateRequest, options?: TransportRequestOptionsWithOutMeta): Promise> export default async function UpdateApi (this: That, params: T.UpdateRequest, options?: TransportRequestOptionsWithMeta): Promise, unknown>> export default async function UpdateApi (this: That, params: T.UpdateRequest, options?: TransportRequestOptions): Promise> export default async function UpdateApi (this: That, params: T.UpdateRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id', 'index'] - const acceptedBody: string[] = ['detect_noop', 'doc', 'doc_as_upsert', 'script', 'scripted_upsert', '_source', 'upsert'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = acceptedParams.update + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -68,8 +96,14 @@ export default async function UpdateApi = { + update_by_query: { + path: [ + 'index' + ], + body: [ + 'max_docs', + 'query', + 'script', + 'slice', + 'conflicts' + ], + query: [ + 'allow_no_indices', + 'analyzer', + 'analyze_wildcard', + 'conflicts', + 'default_operator', + 'df', + 'expand_wildcards', + 'from', + 'ignore_unavailable', + 'lenient', + 'max_docs', + 'pipeline', + 'preference', + 'q', + 'refresh', + 'request_cache', + 'requests_per_second', + 'routing', + 'scroll', + 'scroll_size', + 'search_timeout', + 'search_type', + 'slices', + 'sort', + 'stats', + 'terminate_after', + 'timeout', + 'version', + 'version_type', + 'wait_for_active_shards', + 'wait_for_completion' + ] + } +} /** * Update documents. Updates documents that match the specified query. If no query is specified, performs an update on every document in the data stream or index without modifying the source, which is useful for picking up mapping changes. If the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or alias: * `read` * `index` or `write` You can specify the query criteria in the request URI or the request body using the same syntax as the search API. When you submit an update by query request, Elasticsearch gets a snapshot of the data stream or index when it begins processing the request and updates matching documents using internal versioning. When the versions match, the document is updated and the version number is incremented. If a document changes between the time that the snapshot is taken and the update operation is processed, it results in a version conflict and the operation fails. You can opt to count version conflicts instead of halting and returning by setting `conflicts` to `proceed`. Note that if you opt to count version conflicts, the operation could attempt to update more documents from the source than `max_docs` until it has successfully updated `max_docs` documents or it has gone through every document in the source query. NOTE: Documents with a version equal to 0 cannot be updated using update by query because internal versioning does not support 0 as a valid version number. While processing an update by query request, Elasticsearch performs multiple search requests sequentially to find all of the matching documents. A bulk update request is performed for each batch of matching documents. Any query or update failures cause the update by query request to fail and the failures are shown in the response. Any update requests that completed successfully still stick, they are not rolled back. **Throttling update requests** To control the rate at which update by query issues batches of update operations, you can set `requests_per_second` to any positive decimal number. This pads each batch with a wait time to throttle the rate. Set `requests_per_second` to `-1` to turn off throttling. Throttling uses a wait time between batches so that the internal scroll requests can be given a timeout that takes the request padding into account. The padding time is the difference between the batch size divided by the `requests_per_second` and the time spent writing. By default the batch size is 1000, so if `requests_per_second` is set to `500`: ``` target_time = 1000 / 500 per second = 2 seconds wait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds ``` Since the batch is issued as a single _bulk request, large batch sizes cause Elasticsearch to create many requests and wait before starting the next set. This is "bursty" instead of "smooth". **Slicing** Update by query supports sliced scroll to parallelize the update process. This can improve efficiency and provide a convenient way to break the request down into smaller parts. Setting `slices` to `auto` chooses a reasonable number for most data streams and indices. This setting will use one slice per shard, up to a certain limit. If there are multiple source data streams or indices, it will choose the number of slices based on the index or backing index with the smallest number of shards. Adding `slices` to `_update_by_query` just automates the manual process of creating sub-requests, which means it has some quirks: * You can see these requests in the tasks APIs. These sub-requests are "child" tasks of the task for the request with slices. * Fetching the status of the task for the request with `slices` only contains the status of completed slices. * These sub-requests are individually addressable for things like cancellation and rethrottling. * Rethrottling the request with `slices` will rethrottle the unfinished sub-request proportionally. * Canceling the request with slices will cancel each sub-request. * Due to the nature of slices each sub-request won't get a perfectly even portion of the documents. All documents will be addressed, but some slices may be larger than others. Expect larger slices to have a more even distribution. * Parameters like `requests_per_second` and `max_docs` on a request with slices are distributed proportionally to each sub-request. Combine that with the point above about distribution being uneven and you should conclude that using `max_docs` with `slices` might not result in exactly `max_docs` documents being updated. * Each sub-request gets a slightly different snapshot of the source data stream or index though these are all taken at approximately the same time. If you're slicing manually or otherwise tuning automatic slicing, keep in mind that: * Query performance is most efficient when the number of slices is equal to the number of shards in the index or backing index. If that number is large (for example, 500), choose a lower number as too many slices hurts performance. Setting slices higher than the number of shards generally does not improve efficiency and adds overhead. * Update performance scales linearly across available resources with the number of slices. Whether query or update performance dominates the runtime depends on the documents being reindexed and cluster resources. **Update the document source** Update by query supports scripts to update the document source. As with the update API, you can set `ctx.op` to change the operation that is performed. Set `ctx.op = "noop"` if your script decides that it doesn't have to make any changes. The update by query operation skips updating the document and increments the `noop` counter. Set `ctx.op = "delete"` if your script decides that the document should be deleted. The update by query operation deletes the document and increments the `deleted` counter. Update by query supports only `index`, `noop`, and `delete`. Setting `ctx.op` to anything else is an error. Setting any other field in `ctx` is an error. This API enables you to only modify the source of matching documents; you cannot move them. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-update-by-query | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-update-by-query | Elasticsearch API documentation} */ export default async function UpdateByQueryApi (this: That, params: T.UpdateByQueryRequest, options?: TransportRequestOptionsWithOutMeta): Promise export default async function UpdateByQueryApi (this: That, params: T.UpdateByQueryRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function UpdateByQueryApi (this: That, params: T.UpdateByQueryRequest, options?: TransportRequestOptions): Promise export default async function UpdateByQueryApi (this: That, params: T.UpdateByQueryRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['index'] - const acceptedBody: string[] = ['max_docs', 'query', 'script', 'slice', 'conflicts'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = acceptedParams.update_by_query + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -68,8 +111,14 @@ export default async function UpdateByQueryApi (this: That, params: T.UpdateByQu } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/update_by_query_rethrottle.ts b/src/api/api/update_by_query_rethrottle.ts index eb96ad0ed..b9b72ecca 100644 --- a/src/api/api/update_by_query_rethrottle.ts +++ b/src/api/api/update_by_query_rethrottle.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,17 +21,35 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport +} + +const acceptedParams: Record = { + update_by_query_rethrottle: { + path: [ + 'task_id' + ], + body: [], + query: [ + 'requests_per_second' + ] + } +} /** * Throttle an update by query operation. Change the number of requests per second for a particular update by query operation. Rethrottling that speeds up the query takes effect immediately but rethrotting that slows down the query takes effect after completing the current batch to prevent scroll timeouts. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-update-by-query-rethrottle | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-update-by-query-rethrottle | Elasticsearch API documentation} */ export default async function UpdateByQueryRethrottleApi (this: That, params: T.UpdateByQueryRethrottleRequest, options?: TransportRequestOptionsWithOutMeta): Promise export default async function UpdateByQueryRethrottleApi (this: That, params: T.UpdateByQueryRethrottleRequest, options?: TransportRequestOptionsWithMeta): Promise> export default async function UpdateByQueryRethrottleApi (this: That, params: T.UpdateByQueryRethrottleRequest, options?: TransportRequestOptions): Promise export default async function UpdateByQueryRethrottleApi (this: That, params: T.UpdateByQueryRethrottleRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['task_id'] + const { + path: acceptedPath + } = acceptedParams.update_by_query_rethrottle + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/api/watcher.ts b/src/api/api/watcher.ts index 7e795d62b..a4278f78f 100644 --- a/src/api/api/watcher.ts +++ b/src/api/api/watcher.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,23 +21,162 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} + +const commonQueryParams = ['error_trace', 'filter_path', 'human', 'pretty'] export default class Watcher { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'watcher.ack_watch': { + path: [ + 'watch_id', + 'action_id' + ], + body: [], + query: [] + }, + 'watcher.activate_watch': { + path: [ + 'watch_id' + ], + body: [], + query: [] + }, + 'watcher.deactivate_watch': { + path: [ + 'watch_id' + ], + body: [], + query: [] + }, + 'watcher.delete_watch': { + path: [ + 'id' + ], + body: [], + query: [] + }, + 'watcher.execute_watch': { + path: [ + 'id' + ], + body: [ + 'action_modes', + 'alternative_input', + 'ignore_condition', + 'record_execution', + 'simulated_actions', + 'trigger_data', + 'watch' + ], + query: [ + 'debug' + ] + }, + 'watcher.get_settings': { + path: [], + body: [], + query: [ + 'master_timeout' + ] + }, + 'watcher.get_watch': { + path: [ + 'id' + ], + body: [], + query: [] + }, + 'watcher.put_watch': { + path: [ + 'id' + ], + body: [ + 'actions', + 'condition', + 'input', + 'metadata', + 'throttle_period', + 'throttle_period_in_millis', + 'transform', + 'trigger' + ], + query: [ + 'active', + 'if_primary_term', + 'if_seq_no', + 'version' + ] + }, + 'watcher.query_watches': { + path: [], + body: [ + 'from', + 'size', + 'query', + 'sort', + 'search_after' + ], + query: [] + }, + 'watcher.start': { + path: [], + body: [], + query: [ + 'master_timeout' + ] + }, + 'watcher.stats': { + path: [ + 'metric' + ], + body: [], + query: [ + 'emit_stacktraces', + 'metric' + ] + }, + 'watcher.stop': { + path: [], + body: [], + query: [ + 'master_timeout' + ] + }, + 'watcher.update_settings': { + path: [], + body: [ + 'index.auto_expand_replicas', + 'index.number_of_replicas' + ], + query: [ + 'master_timeout', + 'timeout' + ] + } + } } /** * Acknowledge a watch. Acknowledging a watch enables you to manually throttle the execution of the watch's actions. The acknowledgement state of an action is stored in the `status.actions..ack.state` structure. IMPORTANT: If the specified watch is currently being executed, this API will return an error The reason for this behavior is to prevent overwriting the watch status from a watch execution. Acknowledging an action throttles further executions of that action until its `ack.state` is reset to `awaits_successful_execution`. This happens when the condition of the watch is not met (the condition evaluates to false). - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-ack-watch | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-watcher-ack-watch | Elasticsearch API documentation} */ async ackWatch (this: That, params: T.WatcherAckWatchRequest, options?: TransportRequestOptionsWithOutMeta): Promise async ackWatch (this: That, params: T.WatcherAckWatchRequest, options?: TransportRequestOptionsWithMeta): Promise> async ackWatch (this: That, params: T.WatcherAckWatchRequest, options?: TransportRequestOptions): Promise async ackWatch (this: That, params: T.WatcherAckWatchRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['watch_id', 'action_id'] + const { + path: acceptedPath + } = this.acceptedParams['watcher.ack_watch'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -95,13 +220,16 @@ export default class Watcher { /** * Activate a watch. A watch can be either active or inactive. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-activate-watch | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-watcher-activate-watch | Elasticsearch API documentation} */ async activateWatch (this: That, params: T.WatcherActivateWatchRequest, options?: TransportRequestOptionsWithOutMeta): Promise async activateWatch (this: That, params: T.WatcherActivateWatchRequest, options?: TransportRequestOptionsWithMeta): Promise> async activateWatch (this: That, params: T.WatcherActivateWatchRequest, options?: TransportRequestOptions): Promise async activateWatch (this: That, params: T.WatcherActivateWatchRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['watch_id'] + const { + path: acceptedPath + } = this.acceptedParams['watcher.activate_watch'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -137,13 +265,16 @@ export default class Watcher { /** * Deactivate a watch. A watch can be either active or inactive. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-deactivate-watch | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-watcher-deactivate-watch | Elasticsearch API documentation} */ async deactivateWatch (this: That, params: T.WatcherDeactivateWatchRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deactivateWatch (this: That, params: T.WatcherDeactivateWatchRequest, options?: TransportRequestOptionsWithMeta): Promise> async deactivateWatch (this: That, params: T.WatcherDeactivateWatchRequest, options?: TransportRequestOptions): Promise async deactivateWatch (this: That, params: T.WatcherDeactivateWatchRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['watch_id'] + const { + path: acceptedPath + } = this.acceptedParams['watcher.deactivate_watch'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -179,13 +310,16 @@ export default class Watcher { /** * Delete a watch. When the watch is removed, the document representing the watch in the `.watches` index is gone and it will never be run again. Deleting a watch does not delete any watch execution records related to this watch from the watch history. IMPORTANT: Deleting a watch must be done by using only this API. Do not delete the watch directly from the `.watches` index using the Elasticsearch delete document API When Elasticsearch security features are enabled, make sure no write privileges are granted to anyone for the `.watches` index. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-delete-watch | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-watcher-delete-watch | Elasticsearch API documentation} */ async deleteWatch (this: That, params: T.WatcherDeleteWatchRequest, options?: TransportRequestOptionsWithOutMeta): Promise async deleteWatch (this: That, params: T.WatcherDeleteWatchRequest, options?: TransportRequestOptionsWithMeta): Promise> async deleteWatch (this: That, params: T.WatcherDeleteWatchRequest, options?: TransportRequestOptions): Promise async deleteWatch (this: That, params: T.WatcherDeleteWatchRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] + const { + path: acceptedPath + } = this.acceptedParams['watcher.delete_watch'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -221,14 +355,18 @@ export default class Watcher { /** * Run a watch. This API can be used to force execution of the watch outside of its triggering logic or to simulate the watch execution for debugging purposes. For testing and debugging purposes, you also have fine-grained control on how the watch runs. You can run the watch without running all of its actions or alternatively by simulating them. You can also force execution by ignoring the watch condition and control whether a watch record would be written to the watch history after it runs. You can use the run watch API to run watches that are not yet registered by specifying the watch definition inline. This serves as great tool for testing and debugging your watches prior to adding them to Watcher. When Elasticsearch security features are enabled on your cluster, watches are run with the privileges of the user that stored the watches. If your user is allowed to read index `a`, but not index `b`, then the exact same set of rules will apply during execution of a watch. When using the run watch API, the authorization data of the user that called the API will be used as a base, instead of the information who stored the watch. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-execute-watch | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-watcher-execute-watch | Elasticsearch API documentation} */ async executeWatch (this: That, params?: T.WatcherExecuteWatchRequest, options?: TransportRequestOptionsWithOutMeta): Promise async executeWatch (this: That, params?: T.WatcherExecuteWatchRequest, options?: TransportRequestOptionsWithMeta): Promise> async executeWatch (this: That, params?: T.WatcherExecuteWatchRequest, options?: TransportRequestOptions): Promise async executeWatch (this: That, params?: T.WatcherExecuteWatchRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] - const acceptedBody: string[] = ['action_modes', 'alternative_input', 'ignore_condition', 'record_execution', 'simulated_actions', 'trigger_data', 'watch'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['watcher.execute_watch'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -251,8 +389,14 @@ export default class Watcher { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -276,13 +420,16 @@ export default class Watcher { /** * Get Watcher index settings. Get settings for the Watcher internal index (`.watches`). Only a subset of settings are shown, for example `index.auto_expand_replicas` and `index.number_of_replicas`. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-get-settings | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-watcher-get-settings | Elasticsearch API documentation} */ async getSettings (this: That, params?: T.WatcherGetSettingsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getSettings (this: That, params?: T.WatcherGetSettingsRequest, options?: TransportRequestOptionsWithMeta): Promise> async getSettings (this: That, params?: T.WatcherGetSettingsRequest, options?: TransportRequestOptions): Promise async getSettings (this: That, params?: T.WatcherGetSettingsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['watcher.get_settings'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -316,13 +463,16 @@ export default class Watcher { /** * Get a watch. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-get-watch | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-watcher-get-watch | Elasticsearch API documentation} */ async getWatch (this: That, params: T.WatcherGetWatchRequest, options?: TransportRequestOptionsWithOutMeta): Promise async getWatch (this: That, params: T.WatcherGetWatchRequest, options?: TransportRequestOptionsWithMeta): Promise> async getWatch (this: That, params: T.WatcherGetWatchRequest, options?: TransportRequestOptions): Promise async getWatch (this: That, params: T.WatcherGetWatchRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] + const { + path: acceptedPath + } = this.acceptedParams['watcher.get_watch'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -358,14 +508,18 @@ export default class Watcher { /** * Create or update a watch. When a watch is registered, a new document that represents the watch is added to the `.watches` index and its trigger is immediately registered with the relevant trigger engine. Typically for the `schedule` trigger, the scheduler is the trigger engine. IMPORTANT: You must use Kibana or this API to create a watch. Do not add a watch directly to the `.watches` index by using the Elasticsearch index API. If Elasticsearch security features are enabled, do not give users write privileges on the `.watches` index. When you add a watch you can also define its initial active state by setting the *active* parameter. When Elasticsearch security features are enabled, your watch can index or search only on indices for which the user that stored the watch has privileges. If the user is able to read index `a`, but not index `b`, the same will apply when the watch runs. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-put-watch | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-watcher-put-watch | Elasticsearch API documentation} */ async putWatch (this: That, params: T.WatcherPutWatchRequest, options?: TransportRequestOptionsWithOutMeta): Promise async putWatch (this: That, params: T.WatcherPutWatchRequest, options?: TransportRequestOptionsWithMeta): Promise> async putWatch (this: That, params: T.WatcherPutWatchRequest, options?: TransportRequestOptions): Promise async putWatch (this: That, params: T.WatcherPutWatchRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['id'] - const acceptedBody: string[] = ['actions', 'condition', 'input', 'metadata', 'throttle_period', 'throttle_period_in_millis', 'transform', 'trigger'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['watcher.put_watch'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -387,8 +541,14 @@ export default class Watcher { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -405,14 +565,18 @@ export default class Watcher { /** * Query watches. Get all registered watches in a paginated manner and optionally filter watches by a query. Note that only the `_id` and `metadata.*` fields are queryable or sortable. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-query-watches | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-watcher-query-watches | Elasticsearch API documentation} */ async queryWatches (this: That, params?: T.WatcherQueryWatchesRequest, options?: TransportRequestOptionsWithOutMeta): Promise async queryWatches (this: That, params?: T.WatcherQueryWatchesRequest, options?: TransportRequestOptionsWithMeta): Promise> async queryWatches (this: That, params?: T.WatcherQueryWatchesRequest, options?: TransportRequestOptions): Promise async queryWatches (this: That, params?: T.WatcherQueryWatchesRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['from', 'size', 'query', 'sort', 'search_after'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['watcher.query_watches'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -435,8 +599,14 @@ export default class Watcher { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -450,13 +620,16 @@ export default class Watcher { /** * Start the watch service. Start the Watcher service if it is not already running. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-start | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-watcher-start | Elasticsearch API documentation} */ async start (this: That, params?: T.WatcherStartRequest, options?: TransportRequestOptionsWithOutMeta): Promise async start (this: That, params?: T.WatcherStartRequest, options?: TransportRequestOptionsWithMeta): Promise> async start (this: That, params?: T.WatcherStartRequest, options?: TransportRequestOptions): Promise async start (this: That, params?: T.WatcherStartRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['watcher.start'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -490,13 +663,16 @@ export default class Watcher { /** * Get Watcher statistics. This API always returns basic metrics. You retrieve more metrics by using the metric parameter. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-stats | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-watcher-stats | Elasticsearch API documentation} */ async stats (this: That, params?: T.WatcherStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async stats (this: That, params?: T.WatcherStatsRequest, options?: TransportRequestOptionsWithMeta): Promise> async stats (this: That, params?: T.WatcherStatsRequest, options?: TransportRequestOptions): Promise async stats (this: That, params?: T.WatcherStatsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = ['metric'] + const { + path: acceptedPath + } = this.acceptedParams['watcher.stats'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -540,13 +716,16 @@ export default class Watcher { /** * Stop the watch service. Stop the Watcher service if it is running. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-stop | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-watcher-stop | Elasticsearch API documentation} */ async stop (this: That, params?: T.WatcherStopRequest, options?: TransportRequestOptionsWithOutMeta): Promise async stop (this: That, params?: T.WatcherStopRequest, options?: TransportRequestOptionsWithMeta): Promise> async stop (this: That, params?: T.WatcherStopRequest, options?: TransportRequestOptions): Promise async stop (this: That, params?: T.WatcherStopRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['watcher.stop'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -579,15 +758,19 @@ export default class Watcher { } /** - * Update Watcher index settings. Update settings for the Watcher internal index (`.watches`). Only a subset of settings can be modified. This includes `index.auto_expand_replicas` and `index.number_of_replicas`. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-watcher-update-settings | Elasticsearch API documentation} + * Update Watcher index settings. Update settings for the Watcher internal index (`.watches`). Only a subset of settings can be modified. This includes `index.auto_expand_replicas`, `index.number_of_replicas`, `index.routing.allocation.exclude.*`, `index.routing.allocation.include.*` and `index.routing.allocation.require.*`. Modification of `index.routing.allocation.include._tier_preference` is an exception and is not allowed as the Watcher shards must always be in the `data_content` tier. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-watcher-update-settings | Elasticsearch API documentation} */ async updateSettings (this: That, params?: T.WatcherUpdateSettingsRequest, options?: TransportRequestOptionsWithOutMeta): Promise async updateSettings (this: That, params?: T.WatcherUpdateSettingsRequest, options?: TransportRequestOptionsWithMeta): Promise> async updateSettings (this: That, params?: T.WatcherUpdateSettingsRequest, options?: TransportRequestOptions): Promise async updateSettings (this: That, params?: T.WatcherUpdateSettingsRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] - const acceptedBody: string[] = ['index.auto_expand_replicas', 'index.number_of_replicas'] + const { + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery + } = this.acceptedParams['watcher.update_settings'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -610,8 +793,14 @@ export default class Watcher { } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - // @ts-expect-error - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } diff --git a/src/api/api/xpack.ts b/src/api/api/xpack.ts index 9e6a66f7b..cad3e3743 100644 --- a/src/api/api/xpack.ts +++ b/src/api/api/xpack.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ @@ -35,23 +21,49 @@ import { TransportResult } from '@elastic/transport' import * as T from '../types' -interface That { transport: Transport } + +interface That { + transport: Transport + acceptedParams: Record +} export default class Xpack { transport: Transport + acceptedParams: Record constructor (transport: Transport) { this.transport = transport + this.acceptedParams = { + 'xpack.info': { + path: [], + body: [], + query: [ + 'categories', + 'accept_enterprise', + 'human' + ] + }, + 'xpack.usage': { + path: [], + body: [], + query: [ + 'master_timeout' + ] + } + } } /** * Get information. The information provided by the API includes: * Build information including the build number and timestamp. * License information about the currently installed license. * Feature information for the features that are currently enabled and available under the current license. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-info | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/operation/operation-info | Elasticsearch API documentation} */ async info (this: That, params?: T.XpackInfoRequest, options?: TransportRequestOptionsWithOutMeta): Promise async info (this: That, params?: T.XpackInfoRequest, options?: TransportRequestOptionsWithMeta): Promise> async info (this: That, params?: T.XpackInfoRequest, options?: TransportRequestOptions): Promise async info (this: That, params?: T.XpackInfoRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['xpack.info'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} @@ -85,13 +97,16 @@ export default class Xpack { /** * Get usage information. Get information about the features that are currently enabled and available under the current license. The API also provides some usage statistics. - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-xpack | Elasticsearch API documentation} + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/v9/group/endpoint-xpack | Elasticsearch API documentation} */ async usage (this: That, params?: T.XpackUsageRequest, options?: TransportRequestOptionsWithOutMeta): Promise async usage (this: That, params?: T.XpackUsageRequest, options?: TransportRequestOptionsWithMeta): Promise> async usage (this: That, params?: T.XpackUsageRequest, options?: TransportRequestOptions): Promise async usage (this: That, params?: T.XpackUsageRequest, options?: TransportRequestOptions): Promise { - const acceptedPath: string[] = [] + const { + path: acceptedPath + } = this.acceptedParams['xpack.usage'] + const userQuery = params?.querystring const querystring: Record = userQuery != null ? { ...userQuery } : {} diff --git a/src/api/index.ts b/src/api/index.ts index f69eb473d..cfa328a82 100644 --- a/src/api/index.ts +++ b/src/api/index.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable import/export */ diff --git a/src/api/types.ts b/src/api/types.ts index e242e803c..c0f34bf9b 100644 --- a/src/api/types.ts +++ b/src/api/types.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable @typescript-eslint/array-type */ @@ -40,8 +26,11 @@ export interface BulkIndexOperation extends BulkWriteOperation { } export interface BulkOperationBase { + /** The document ID. */ _id?: Id + /** The name of the index or index alias to perform the action on. */ _index?: IndexName + /** A custom value used to route operations to a specific shard. */ routing?: Routing if_primary_term?: long if_seq_no?: SequenceNumber @@ -50,36 +39,58 @@ export interface BulkOperationBase { } export interface BulkOperationContainer { + /** Index the specified document. + * If the document exists, it replaces the document and increments the version. + * The following line must contain the source data to be indexed. */ index?: BulkIndexOperation + /** Index the specified document if it does not already exist. + * The following line must contain the source data to be indexed. */ create?: BulkCreateOperation + /** Perform a partial document update. + * The following line must contain the partial document and update options. */ update?: BulkUpdateOperation + /** Remove the specified document from the index. */ delete?: BulkDeleteOperation } export type BulkOperationType = 'index' | 'create' | 'update' | 'delete' export interface BulkRequest extends RequestBase { -/** The name of the data stream, index, or index alias to perform bulk actions on. */ + /** The name of the data stream, index, or index alias to perform bulk actions on. */ index?: IndexName /** True or false if to include the document source in the error message in case of parsing errors. */ include_source_on_error?: boolean /** If `true`, the response will include the ingest pipelines that were run for each index or create. */ list_executed_pipelines?: boolean - /** The pipeline identifier to use to preprocess incoming documents. If the index has a default ingest pipeline specified, setting the value to `_none` turns off the default ingest pipeline for this request. If a final pipeline is configured, it will always run regardless of the value of this parameter. */ + /** The pipeline identifier to use to preprocess incoming documents. + * If the index has a default ingest pipeline specified, setting the value to `_none` turns off the default ingest pipeline for this request. + * If a final pipeline is configured, it will always run regardless of the value of this parameter. */ pipeline?: string - /** If `true`, Elasticsearch refreshes the affected shards to make this operation visible to search. If `wait_for`, wait for a refresh to make this operation visible to search. If `false`, do nothing with refreshes. Valid values: `true`, `false`, `wait_for`. */ + /** If `true`, Elasticsearch refreshes the affected shards to make this operation visible to search. + * If `wait_for`, wait for a refresh to make this operation visible to search. + * If `false`, do nothing with refreshes. + * Valid values: `true`, `false`, `wait_for`. */ refresh?: Refresh /** A custom value that is used to route operations to a specific shard. */ routing?: Routing /** Indicates whether to return the `_source` field (`true` or `false`) or contains a list of fields to return. */ _source?: SearchSourceConfigParam - /** A comma-separated list of source fields to exclude from the response. You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. */ + /** A comma-separated list of source fields to exclude from the response. + * You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. + * If the `_source` parameter is `false`, this parameter is ignored. */ _source_excludes?: Fields - /** A comma-separated list of source fields to include in the response. If this parameter is specified, only these source fields are returned. You can exclude fields from this subset using the `_source_excludes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. */ + /** A comma-separated list of source fields to include in the response. + * If this parameter is specified, only these source fields are returned. + * You can exclude fields from this subset using the `_source_excludes` query parameter. + * If the `_source` parameter is `false`, this parameter is ignored. */ _source_includes?: Fields - /** The period each action waits for the following operations: automatic index creation, dynamic mapping updates, and waiting for active shards. The default is `1m` (one minute), which guarantees Elasticsearch waits for at least the timeout before failing. The actual wait time could be longer, particularly when multiple waits occur. */ + /** The period each action waits for the following operations: automatic index creation, dynamic mapping updates, and waiting for active shards. + * The default is `1m` (one minute), which guarantees Elasticsearch waits for at least the timeout before failing. + * The actual wait time could be longer, particularly when multiple waits occur. */ timeout?: Duration - /** The number of shard copies that must be active before proceeding with the operation. Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). The default is `1`, which waits for each primary shard to be active. */ + /** The number of shard copies that must be active before proceeding with the operation. + * Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). + * The default is `1`, which waits for each primary shard to be active. */ wait_for_active_shards?: WaitForActiveShards /** If `true`, the request's actions must target an index alias. */ require_alias?: boolean @@ -93,50 +104,90 @@ export interface BulkRequest ex } export interface BulkResponse { + /** If `true`, one or more of the operations in the bulk request did not complete successfully. */ errors: boolean + /** The result of each operation in the bulk request, in the order they were submitted. */ items: Partial>[] + /** The length of time, in milliseconds, it took to process the bulk request. */ took: long ingest_took?: long } export interface BulkResponseItem { + /** The document ID associated with the operation. */ _id?: string | null + /** The name of the index associated with the operation. + * If the operation targeted a data stream, this is the backing index into which the document was written. */ _index: string + /** The HTTP status code returned for the operation. */ status: integer failure_store?: BulkFailureStoreStatus + /** Additional information about the failed operation. + * The property is returned only for failed operations. */ error?: ErrorCause + /** The primary term assigned to the document for the operation. + * This property is returned only for successful operations. */ _primary_term?: long + /** The result of the operation. + * Successful values are `created`, `deleted`, and `updated`. */ result?: string + /** The sequence number assigned to the document for the operation. + * Sequence numbers are used to ensure an older version of a document doesn't overwrite a newer version. */ _seq_no?: SequenceNumber + /** Shard information for the operation. */ _shards?: ShardStatistics + /** The document version associated with the operation. + * The document version is incremented each time the document is updated. + * This property is returned only for successful actions. */ _version?: VersionNumber forced_refresh?: boolean get?: InlineGet> } export interface BulkUpdateAction { + /** If true, the `result` in the response is set to 'noop' when no changes to the document occur. */ detect_noop?: boolean + /** A partial update to an existing document. */ doc?: TPartialDocument + /** Set to `true` to use the contents of `doc` as the value of `upsert`. */ doc_as_upsert?: boolean - script?: Script | string + /** The script to run to update the document. */ + script?: Script | ScriptSource + /** Set to `true` to run the script whether or not the document exists. */ scripted_upsert?: boolean + /** If `false`, source retrieval is turned off. + * You can also specify a comma-separated list of the fields you want to retrieve. */ _source?: SearchSourceConfig + /** If the document does not already exist, the contents of `upsert` are inserted as a new document. + * If the document exists, the `script` is run. */ upsert?: TDocument } export interface BulkUpdateOperation extends BulkOperationBase { + /** If `true`, the request's actions must target an index alias. */ require_alias?: boolean + /** The number of times an update should be retried in the case of a version conflict. */ retry_on_conflict?: integer } export interface BulkWriteOperation extends BulkOperationBase { + /** A map from the full name of fields to the name of dynamic templates. + * It defaults to an empty map. + * If a name matches a dynamic template, that template will be applied regardless of other match predicates defined in the template. + * If a field is already defined in the mapping, then this parameter won't be used. */ dynamic_templates?: Record + /** The ID of the pipeline to use to preprocess incoming documents. + * If the index has a default ingest pipeline specified, setting the value to `_none` turns off the default ingest pipeline for this request. + * If a final pipeline is configured, it will always run regardless of the value of this parameter. */ pipeline?: string + /** If `true`, the request's actions must target an index alias. */ require_alias?: boolean } export interface ClearScrollRequest extends RequestBase { -/** A comma-separated list of scroll IDs to clear. To clear all scroll IDs, use `_all`. IMPORTANT: Scroll IDs can be long. It is recommended to specify scroll IDs in the request body parameter. */ + /** A comma-separated list of scroll IDs to clear. + * To clear all scroll IDs, use `_all`. + * IMPORTANT: Scroll IDs can be long. It is recommended to specify scroll IDs in the request body parameter. */ scroll_id?: ScrollIds /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { scroll_id?: never } @@ -145,12 +196,15 @@ export interface ClearScrollRequest extends RequestBase { } export interface ClearScrollResponse { + /** If `true`, the request succeeded. + * This does not indicate whether any scrolling search requests were cleared. */ succeeded: boolean + /** The number of scrolling search requests cleared. */ num_freed: integer } export interface ClosePointInTimeRequest extends RequestBase { -/** The ID of the point-in-time. */ + /** The ID of the point-in-time. */ id: Id /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { id?: never } @@ -159,42 +213,64 @@ export interface ClosePointInTimeRequest extends RequestBase { } export interface ClosePointInTimeResponse { + /** If `true`, all search contexts associated with the point-in-time ID were successfully closed. */ succeeded: boolean + /** The number of search contexts that were successfully closed. */ num_freed: integer } export interface CountRequest extends RequestBase { -/** A comma-separated list of data streams, indices, and aliases to search. It supports wildcards (`*`). To search all data streams and indices, omit this parameter or use `*` or `_all`. */ + /** A comma-separated list of data streams, indices, and aliases to search. + * It supports wildcards (`*`). + * To search all data streams and indices, omit this parameter or use `*` or `_all`. */ index?: Indices - /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. */ + /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. + * This behavior applies even if the request targets other open indices. + * For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. */ allow_no_indices?: boolean - /** The analyzer to use for the query string. This parameter can be used only when the `q` query string parameter is specified. */ + /** The analyzer to use for the query string. + * This parameter can be used only when the `q` query string parameter is specified. */ analyzer?: string - /** If `true`, wildcard and prefix queries are analyzed. This parameter can be used only when the `q` query string parameter is specified. */ + /** If `true`, wildcard and prefix queries are analyzed. + * This parameter can be used only when the `q` query string parameter is specified. */ analyze_wildcard?: boolean - /** The default operator for query string query: `AND` or `OR`. This parameter can be used only when the `q` query string parameter is specified. */ + /** The default operator for query string query: `AND` or `OR`. + * This parameter can be used only when the `q` query string parameter is specified. */ default_operator?: QueryDslOperator - /** The field to use as a default when no field prefix is given in the query string. This parameter can be used only when the `q` query string parameter is specified. */ + /** The field to use as a default when no field prefix is given in the query string. + * This parameter can be used only when the `q` query string parameter is specified. */ df?: string - /** The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports comma-separated values, such as `open,hidden`. */ + /** The type of index that wildcard patterns can match. + * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. + * It supports comma-separated values, such as `open,hidden`. */ expand_wildcards?: ExpandWildcards /** If `true`, concrete, expanded, or aliased indices are ignored when frozen. */ ignore_throttled?: boolean /** If `false`, the request returns an error if it targets a missing or closed index. */ ignore_unavailable?: boolean - /** If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. This parameter can be used only when the `q` query string parameter is specified. */ + /** If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. + * This parameter can be used only when the `q` query string parameter is specified. */ lenient?: boolean /** The minimum `_score` value that documents must have to be included in the result. */ min_score?: double - /** The node or shard the operation should be performed on. By default, it is random. */ + /** The node or shard the operation should be performed on. + * By default, it is random. */ preference?: string /** A custom value used to route operations to a specific shard. */ routing?: Routing - /** The maximum number of documents to collect for each shard. If a query reaches this limit, Elasticsearch terminates the query early. Elasticsearch collects documents before sorting. IMPORTANT: Use with caution. Elasticsearch applies this parameter to each shard handling the request. When possible, let Elasticsearch perform early termination automatically. Avoid specifying this parameter for requests that target data streams with backing indices across multiple data tiers. */ + /** The maximum number of documents to collect for each shard. + * If a query reaches this limit, Elasticsearch terminates the query early. + * Elasticsearch collects documents before sorting. + * + * IMPORTANT: Use with caution. + * Elasticsearch applies this parameter to each shard handling the request. + * When possible, let Elasticsearch perform early termination automatically. + * Avoid specifying this parameter for requests that target data streams with backing indices across multiple data tiers. */ terminate_after?: long /** The query in Lucene query string syntax. This parameter cannot be used with a request body. */ q?: string - /** Defines the search query using Query DSL. A request body query cannot be used with the `q` query string parameter. */ + /** Defines the search query using Query DSL. A request body query cannot be used + * with the `q` query string parameter. */ query?: QueryDslQueryContainer /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, analyzer?: never, analyze_wildcard?: never, default_operator?: never, df?: never, expand_wildcards?: never, ignore_throttled?: never, ignore_unavailable?: never, lenient?: never, min_score?: never, preference?: never, routing?: never, terminate_after?: never, q?: never, query?: never } @@ -208,37 +284,69 @@ export interface CountResponse { } export interface CreateRequest extends RequestBase { -/** A unique identifier for the document. To automatically generate a document ID, use the `POST //_doc/` request format. */ + /** A unique identifier for the document. + * To automatically generate a document ID, use the `POST //_doc/` request format. */ id: Id - /** The name of the data stream or index to target. If the target doesn't exist and matches the name or wildcard (`*`) pattern of an index template with a `data_stream` definition, this request creates the data stream. If the target doesn't exist and doesn’t match a data stream template, this request creates the index. */ + /** The name of the data stream or index to target. + * If the target doesn't exist and matches the name or wildcard (`*`) pattern of an index template with a `data_stream` definition, this request creates the data stream. + * If the target doesn't exist and doesn’t match a data stream template, this request creates the index. */ index: IndexName + /** Only perform the operation if the document has this primary term. */ + if_primary_term?: long + /** Only perform the operation if the document has this sequence number. */ + if_seq_no?: SequenceNumber /** True or false if to include the document source in the error message in case of parsing errors. */ include_source_on_error?: boolean - /** The ID of the pipeline to use to preprocess incoming documents. If the index has a default ingest pipeline specified, setting the value to `_none` turns off the default ingest pipeline for this request. If a final pipeline is configured, it will always run regardless of the value of this parameter. */ + /** Set to `create` to only index the document if it does not already exist (put if absent). + * If a document with the specified `_id` already exists, the indexing operation will fail. + * The behavior is the same as using the `/_create` endpoint. + * If a document ID is specified, this paramater defaults to `index`. + * Otherwise, it defaults to `create`. + * If the request targets a data stream, an `op_type` of `create` is required. */ + op_type?: OpType + /** The ID of the pipeline to use to preprocess incoming documents. + * If the index has a default ingest pipeline specified, setting the value to `_none` turns off the default ingest pipeline for this request. + * If a final pipeline is configured, it will always run regardless of the value of this parameter. */ pipeline?: string - /** If `true`, Elasticsearch refreshes the affected shards to make this operation visible to search. If `wait_for`, it waits for a refresh to make this operation visible to search. If `false`, it does nothing with refreshes. */ + /** If `true`, Elasticsearch refreshes the affected shards to make this operation visible to search. + * If `wait_for`, it waits for a refresh to make this operation visible to search. + * If `false`, it does nothing with refreshes. */ refresh?: Refresh + /** If `true`, the destination must be an index alias. */ + require_alias?: boolean + /** If `true`, the request's actions must target a data stream (existing or to be created). */ + require_data_stream?: boolean /** A custom value that is used to route operations to a specific shard. */ routing?: Routing - /** The period the request waits for the following operations: automatic index creation, dynamic mapping updates, waiting for active shards. Elasticsearch waits for at least the specified timeout period before failing. The actual wait time could be longer, particularly when multiple waits occur. This parameter is useful for situations where the primary shard assigned to perform the operation might not be available when the operation runs. Some reasons for this might be that the primary shard is currently recovering from a gateway or undergoing relocation. By default, the operation will wait on the primary shard to become available for at least 1 minute before failing and responding with an error. The actual wait time could be longer, particularly when multiple waits occur. */ + /** The period the request waits for the following operations: automatic index creation, dynamic mapping updates, waiting for active shards. + * Elasticsearch waits for at least the specified timeout period before failing. + * The actual wait time could be longer, particularly when multiple waits occur. + * + * This parameter is useful for situations where the primary shard assigned to perform the operation might not be available when the operation runs. + * Some reasons for this might be that the primary shard is currently recovering from a gateway or undergoing relocation. + * By default, the operation will wait on the primary shard to become available for at least 1 minute before failing and responding with an error. + * The actual wait time could be longer, particularly when multiple waits occur. */ timeout?: Duration - /** The explicit version number for concurrency control. It must be a non-negative long number. */ + /** The explicit version number for concurrency control. + * It must be a non-negative long number. */ version?: VersionNumber /** The version type. */ version_type?: VersionType - /** The number of shard copies that must be active before proceeding with the operation. You can set it to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). The default value of `1` means it waits for each primary shard to be active. */ + /** The number of shard copies that must be active before proceeding with the operation. + * You can set it to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). + * The default value of `1` means it waits for each primary shard to be active. */ wait_for_active_shards?: WaitForActiveShards document?: TDocument /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { id?: never, index?: never, include_source_on_error?: never, pipeline?: never, refresh?: never, routing?: never, timeout?: never, version?: never, version_type?: never, wait_for_active_shards?: never, document?: never } + body?: string | { [key: string]: any } & { id?: never, index?: never, if_primary_term?: never, if_seq_no?: never, include_source_on_error?: never, op_type?: never, pipeline?: never, refresh?: never, require_alias?: never, require_data_stream?: never, routing?: never, timeout?: never, version?: never, version_type?: never, wait_for_active_shards?: never, document?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { id?: never, index?: never, include_source_on_error?: never, pipeline?: never, refresh?: never, routing?: never, timeout?: never, version?: never, version_type?: never, wait_for_active_shards?: never, document?: never } + querystring?: { [key: string]: any } & { id?: never, index?: never, if_primary_term?: never, if_seq_no?: never, include_source_on_error?: never, op_type?: never, pipeline?: never, refresh?: never, require_alias?: never, require_data_stream?: never, routing?: never, timeout?: never, version?: never, version_type?: never, wait_for_active_shards?: never, document?: never } } export type CreateResponse = WriteResponseBase export interface DeleteRequest extends RequestBase { -/** A unique identifier for the document. */ + /** A unique identifier for the document. */ id: Id /** The name of the target index. */ index: IndexName @@ -246,17 +354,26 @@ export interface DeleteRequest extends RequestBase { if_primary_term?: long /** Only perform the operation if the document has this sequence number. */ if_seq_no?: SequenceNumber - /** If `true`, Elasticsearch refreshes the affected shards to make this operation visible to search. If `wait_for`, it waits for a refresh to make this operation visible to search. If `false`, it does nothing with refreshes. */ + /** If `true`, Elasticsearch refreshes the affected shards to make this operation visible to search. + * If `wait_for`, it waits for a refresh to make this operation visible to search. + * If `false`, it does nothing with refreshes. */ refresh?: Refresh /** A custom value used to route operations to a specific shard. */ routing?: Routing - /** The period to wait for active shards. This parameter is useful for situations where the primary shard assigned to perform the delete operation might not be available when the delete operation runs. Some reasons for this might be that the primary shard is currently recovering from a store or undergoing relocation. By default, the delete operation will wait on the primary shard to become available for up to 1 minute before failing and responding with an error. */ + /** The period to wait for active shards. + * + * This parameter is useful for situations where the primary shard assigned to perform the delete operation might not be available when the delete operation runs. + * Some reasons for this might be that the primary shard is currently recovering from a store or undergoing relocation. + * By default, the delete operation will wait on the primary shard to become available for up to 1 minute before failing and responding with an error. */ timeout?: Duration - /** An explicit version number for concurrency control. It must match the current version of the document for the request to succeed. */ + /** An explicit version number for concurrency control. + * It must match the current version of the document for the request to succeed. */ version?: VersionNumber /** The version type. */ version_type?: VersionType - /** The minimum number of shard copies that must be active before proceeding with the operation. You can set it to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). The default value of `1` means it waits for each primary shard to be active. */ + /** The minimum number of shard copies that must be active before proceeding with the operation. + * You can set it to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). + * The default value of `1` means it waits for each primary shard to be active. */ wait_for_active_shards?: WaitForActiveShards /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { id?: never, index?: never, if_primary_term?: never, if_seq_no?: never, refresh?: never, routing?: never, timeout?: never, version?: never, version_type?: never, wait_for_active_shards?: never } @@ -267,33 +384,48 @@ export interface DeleteRequest extends RequestBase { export type DeleteResponse = WriteResponseBase export interface DeleteByQueryRequest extends RequestBase { -/** A comma-separated list of data streams, indices, and aliases to search. It supports wildcards (`*`). To search all data streams or indices, omit this parameter or use `*` or `_all`. */ + /** A comma-separated list of data streams, indices, and aliases to search. + * It supports wildcards (`*`). + * To search all data streams or indices, omit this parameter or use `*` or `_all`. */ index: Indices - /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. */ + /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. + * This behavior applies even if the request targets other open indices. + * For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. */ allow_no_indices?: boolean - /** Analyzer to use for the query string. This parameter can be used only when the `q` query string parameter is specified. */ + /** Analyzer to use for the query string. + * This parameter can be used only when the `q` query string parameter is specified. */ analyzer?: string - /** If `true`, wildcard and prefix queries are analyzed. This parameter can be used only when the `q` query string parameter is specified. */ + /** If `true`, wildcard and prefix queries are analyzed. + * This parameter can be used only when the `q` query string parameter is specified. */ analyze_wildcard?: boolean /** What to do if delete by query hits version conflicts: `abort` or `proceed`. */ conflicts?: Conflicts - /** The default operator for query string query: `AND` or `OR`. This parameter can be used only when the `q` query string parameter is specified. */ + /** The default operator for query string query: `AND` or `OR`. + * This parameter can be used only when the `q` query string parameter is specified. */ default_operator?: QueryDslOperator - /** The field to use as default where no field prefix is given in the query string. This parameter can be used only when the `q` query string parameter is specified. */ + /** The field to use as default where no field prefix is given in the query string. + * This parameter can be used only when the `q` query string parameter is specified. */ df?: string - /** The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports comma-separated values, such as `open,hidden`. */ + /** The type of index that wildcard patterns can match. + * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. + * It supports comma-separated values, such as `open,hidden`. */ expand_wildcards?: ExpandWildcards - /** Starting offset (default: 0) */ + /** Skips the specified number of documents. */ from?: long /** If `false`, the request returns an error if it targets a missing or closed index. */ ignore_unavailable?: boolean - /** If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. This parameter can be used only when the `q` query string parameter is specified. */ + /** If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. + * This parameter can be used only when the `q` query string parameter is specified. */ lenient?: boolean - /** The node or shard the operation should be performed on. It is random by default. */ + /** The node or shard the operation should be performed on. + * It is random by default. */ preference?: string - /** If `true`, Elasticsearch refreshes all shards involved in the delete by query after the request completes. This is different than the delete API's `refresh` parameter, which causes just the shard that received the delete request to be refreshed. Unlike the delete API, it does not support `wait_for`. */ + /** If `true`, Elasticsearch refreshes all shards involved in the delete by query after the request completes. + * This is different than the delete API's `refresh` parameter, which causes just the shard that received the delete request to be refreshed. + * Unlike the delete API, it does not support `wait_for`. */ refresh?: boolean - /** If `true`, the request cache is used for this request. Defaults to the index-level setting. */ + /** If `true`, the request cache is used for this request. + * Defaults to the index-level setting. */ request_cache?: boolean /** The throttle for this request in sub-requests per second. */ requests_per_second?: float @@ -305,9 +437,11 @@ export interface DeleteByQueryRequest extends RequestBase { scroll?: Duration /** The size of the scroll request that powers the operation. */ scroll_size?: long - /** The explicit timeout for each search request. It defaults to no timeout. */ + /** The explicit timeout for each search request. + * It defaults to no timeout. */ search_timeout?: Duration - /** The type of the search operation. Available options include `query_then_fetch` and `dfs_query_then_fetch`. */ + /** The type of the search operation. + * Available options include `query_then_fetch` and `dfs_query_then_fetch`. */ search_type?: SearchType /** The number of slices this task should be divided into. */ slices?: Slices @@ -315,15 +449,25 @@ export interface DeleteByQueryRequest extends RequestBase { sort?: string[] /** The specific `tag` of the request for logging and statistical purposes. */ stats?: string[] - /** The maximum number of documents to collect for each shard. If a query reaches this limit, Elasticsearch terminates the query early. Elasticsearch collects documents before sorting. Use with caution. Elasticsearch applies this parameter to each shard handling the request. When possible, let Elasticsearch perform early termination automatically. Avoid specifying this parameter for requests that target data streams with backing indices across multiple data tiers. */ + /** The maximum number of documents to collect for each shard. + * If a query reaches this limit, Elasticsearch terminates the query early. + * Elasticsearch collects documents before sorting. + * + * Use with caution. + * Elasticsearch applies this parameter to each shard handling the request. + * When possible, let Elasticsearch perform early termination automatically. + * Avoid specifying this parameter for requests that target data streams with backing indices across multiple data tiers. */ terminate_after?: long /** The period each deletion request waits for active shards. */ timeout?: Duration /** If `true`, returns the document version as part of a hit. */ version?: boolean - /** The number of shard copies that must be active before proceeding with the operation. Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). The `timeout` value controls how long each write request waits for unavailable shards to become available. */ + /** The number of shard copies that must be active before proceeding with the operation. + * Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). + * The `timeout` value controls how long each write request waits for unavailable shards to become available. */ wait_for_active_shards?: WaitForActiveShards - /** If `true`, the request blocks until the operation is complete. If `false`, Elasticsearch performs some preflight checks, launches the request, and returns a task you can use to cancel or get the status of the task. Elasticsearch creates a record of this task as a document at `.tasks/task/${taskId}`. When you are done with a task, you should delete the task document so Elasticsearch can reclaim the space. */ + /** If `true`, the request blocks until the operation is complete. + * If `false`, Elasticsearch performs some preflight checks, launches the request, and returns a task you can use to cancel or get the status of the task. Elasticsearch creates a record of this task as a document at `.tasks/task/${taskId}`. When you are done with a task, you should delete the task document so Elasticsearch can reclaim the space. */ wait_for_completion?: boolean /** The maximum number of documents to delete. */ max_docs?: long @@ -338,28 +482,48 @@ export interface DeleteByQueryRequest extends RequestBase { } export interface DeleteByQueryResponse { + /** The number of scroll responses pulled back by the delete by query. */ batches?: long + /** The number of documents that were successfully deleted. */ deleted?: long + /** An array of failures if there were any unrecoverable errors during the process. + * If this array is not empty, the request ended abnormally because of those failures. + * Delete by query is implemented using batches and any failures cause the entire process to end but all failures in the current batch are collected into the array. + * You can use the `conflicts` option to prevent reindex from ending on version conflicts. */ failures?: BulkIndexByScrollFailure[] + /** This field is always equal to zero for delete by query. + * It exists only so that delete by query, update by query, and reindex APIs return responses with the same structure. */ noops?: long + /** The number of requests per second effectively run during the delete by query. */ requests_per_second?: float + /** The number of retries attempted by delete by query. + * `bulk` is the number of bulk actions retried. + * `search` is the number of search actions retried. */ retries?: Retries slice_id?: integer task?: TaskId throttled?: Duration + /** The number of milliseconds the request slept to conform to `requests_per_second`. */ throttled_millis?: DurationValue throttled_until?: Duration + /** This field should always be equal to zero in a `_delete_by_query` response. + * It has meaning only when using the task API, where it indicates the next time (in milliseconds since epoch) a throttled request will be run again in order to conform to `requests_per_second`. */ throttled_until_millis?: DurationValue + /** If `true`, some requests run during the delete by query operation timed out. */ timed_out?: boolean + /** The number of milliseconds from start to end of the whole operation. */ took?: DurationValue + /** The number of documents that were successfully processed. */ total?: long + /** The number of version conflicts that the delete by query hit. */ version_conflicts?: long } export interface DeleteByQueryRethrottleRequest extends RequestBase { -/** The ID for the task. */ + /** The ID for the task. */ task_id: TaskId - /** The throttle for this request in sub-requests per second. To disable throttling, set it to `-1`. */ + /** The throttle for this request in sub-requests per second. + * To disable throttling, set it to `-1`. */ requests_per_second?: float /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { task_id?: never, requests_per_second?: never } @@ -370,11 +534,15 @@ export interface DeleteByQueryRethrottleRequest extends RequestBase { export type DeleteByQueryRethrottleResponse = TasksTaskListResponseBase export interface DeleteScriptRequest extends RequestBase { -/** The identifier for the stored script or search template. */ + /** The identifier for the stored script or search template. */ id: Id - /** The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout. */ + /** The period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. + * It can also be set to `-1` to indicate that the request should never timeout. */ master_timeout?: Duration - /** The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout. */ + /** The period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. + * It can also be set to `-1` to indicate that the request should never timeout. */ timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { id?: never, master_timeout?: never, timeout?: never } @@ -385,27 +553,43 @@ export interface DeleteScriptRequest extends RequestBase { export type DeleteScriptResponse = AcknowledgedResponseBase export interface ExistsRequest extends RequestBase { -/** A unique document identifier. */ + /** A unique document identifier. */ id: Id - /** A comma-separated list of data streams, indices, and aliases. It supports wildcards (`*`). */ + /** A comma-separated list of data streams, indices, and aliases. + * It supports wildcards (`*`). */ index: IndexName - /** The node or shard the operation should be performed on. By default, the operation is randomized between the shard replicas. If it is set to `_local`, the operation will prefer to be run on a local allocated shard when possible. If it is set to a custom value, the value is used to guarantee that the same shards will be used for the same custom value. This can help with "jumping values" when hitting different shards in different refresh states. A sample value can be something like the web session ID or the user name. */ + /** The node or shard the operation should be performed on. + * By default, the operation is randomized between the shard replicas. + * + * If it is set to `_local`, the operation will prefer to be run on a local allocated shard when possible. + * If it is set to a custom value, the value is used to guarantee that the same shards will be used for the same custom value. + * This can help with "jumping values" when hitting different shards in different refresh states. + * A sample value can be something like the web session ID or the user name. */ preference?: string /** If `true`, the request is real-time as opposed to near-real-time. */ realtime?: boolean - /** If `true`, the request refreshes the relevant shards before retrieving the document. Setting it to `true` should be done after careful thought and verification that this does not cause a heavy load on the system (and slow down indexing). */ + /** If `true`, the request refreshes the relevant shards before retrieving the document. + * Setting it to `true` should be done after careful thought and verification that this does not cause a heavy load on the system (and slow down indexing). */ refresh?: boolean /** A custom value used to route operations to a specific shard. */ routing?: Routing /** Indicates whether to return the `_source` field (`true` or `false`) or lists the fields to return. */ _source?: SearchSourceConfigParam - /** A comma-separated list of source fields to exclude from the response. You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. */ + /** A comma-separated list of source fields to exclude from the response. + * You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. + * If the `_source` parameter is `false`, this parameter is ignored. */ _source_excludes?: Fields - /** A comma-separated list of source fields to include in the response. If this parameter is specified, only these source fields are returned. You can exclude fields from this subset using the `_source_excludes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. */ + /** A comma-separated list of source fields to include in the response. + * If this parameter is specified, only these source fields are returned. + * You can exclude fields from this subset using the `_source_excludes` query parameter. + * If the `_source` parameter is `false`, this parameter is ignored. */ _source_includes?: Fields - /** A comma-separated list of stored fields to return as part of a hit. If no fields are specified, no stored fields are included in the response. If this field is specified, the `_source` parameter defaults to `false`. */ + /** A comma-separated list of stored fields to return as part of a hit. + * If no fields are specified, no stored fields are included in the response. + * If this field is specified, the `_source` parameter defaults to `false`. */ stored_fields?: Fields - /** Explicit version number for concurrency control. The specified version must match the current version of the document for the request to succeed. */ + /** Explicit version number for concurrency control. + * The specified version must match the current version of the document for the request to succeed. */ version?: VersionNumber /** The version type. */ version_type?: VersionType @@ -418,15 +602,18 @@ export interface ExistsRequest extends RequestBase { export type ExistsResponse = boolean export interface ExistsSourceRequest extends RequestBase { -/** A unique identifier for the document. */ + /** A unique identifier for the document. */ id: Id - /** A comma-separated list of data streams, indices, and aliases. It supports wildcards (`*`). */ + /** A comma-separated list of data streams, indices, and aliases. + * It supports wildcards (`*`). */ index: IndexName - /** The node or shard the operation should be performed on. By default, the operation is randomized between the shard replicas. */ + /** The node or shard the operation should be performed on. + * By default, the operation is randomized between the shard replicas. */ preference?: string /** If `true`, the request is real-time as opposed to near-real-time. */ realtime?: boolean - /** If `true`, the request refreshes the relevant shards before retrieving the document. Setting it to `true` should be done after careful thought and verification that this does not cause a heavy load on the system (and slow down indexing). */ + /** If `true`, the request refreshes the relevant shards before retrieving the document. + * Setting it to `true` should be done after careful thought and verification that this does not cause a heavy load on the system (and slow down indexing). */ refresh?: boolean /** A custom value used to route operations to a specific shard. */ routing?: Routing @@ -436,7 +623,8 @@ export interface ExistsSourceRequest extends RequestBase { _source_excludes?: Fields /** A comma-separated list of source fields to include in the response. */ _source_includes?: Fields - /** The version number for concurrency control. It must match the current version of the document for the request to succeed. */ + /** The version number for concurrency control. + * It must match the current version of the document for the request to succeed. */ version?: VersionNumber /** The version type. */ version_type?: VersionType @@ -461,29 +649,41 @@ export interface ExplainExplanationDetail { } export interface ExplainRequest extends RequestBase { -/** The document identifier. */ + /** The document identifier. */ id: Id - /** Index names that are used to limit the request. Only a single index name can be provided to this parameter. */ + /** Index names that are used to limit the request. + * Only a single index name can be provided to this parameter. */ index: IndexName - /** The analyzer to use for the query string. This parameter can be used only when the `q` query string parameter is specified. */ + /** The analyzer to use for the query string. + * This parameter can be used only when the `q` query string parameter is specified. */ analyzer?: string - /** If `true`, wildcard and prefix queries are analyzed. This parameter can be used only when the `q` query string parameter is specified. */ + /** If `true`, wildcard and prefix queries are analyzed. + * This parameter can be used only when the `q` query string parameter is specified. */ analyze_wildcard?: boolean - /** The default operator for query string query: `AND` or `OR`. This parameter can be used only when the `q` query string parameter is specified. */ + /** The default operator for query string query: `AND` or `OR`. + * This parameter can be used only when the `q` query string parameter is specified. */ default_operator?: QueryDslOperator - /** The field to use as default where no field prefix is given in the query string. This parameter can be used only when the `q` query string parameter is specified. */ + /** The field to use as default where no field prefix is given in the query string. + * This parameter can be used only when the `q` query string parameter is specified. */ df?: string - /** If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. This parameter can be used only when the `q` query string parameter is specified. */ + /** If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. + * This parameter can be used only when the `q` query string parameter is specified. */ lenient?: boolean - /** The node or shard the operation should be performed on. It is random by default. */ + /** The node or shard the operation should be performed on. + * It is random by default. */ preference?: string /** A custom value used to route operations to a specific shard. */ routing?: Routing /** `True` or `false` to return the `_source` field or not or a list of fields to return. */ _source?: SearchSourceConfigParam - /** A comma-separated list of source fields to exclude from the response. You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. */ + /** A comma-separated list of source fields to exclude from the response. + * You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. + * If the `_source` parameter is `false`, this parameter is ignored. */ _source_excludes?: Fields - /** A comma-separated list of source fields to include in the response. If this parameter is specified, only these source fields are returned. You can exclude fields from this subset using the `_source_excludes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. */ + /** A comma-separated list of source fields to include in the response. + * If this parameter is specified, only these source fields are returned. + * You can exclude fields from this subset using the `_source_excludes` query parameter. + * If the `_source` parameter is `false`, this parameter is ignored. */ _source_includes?: Fields /** A comma-separated list of stored fields to return in the response. */ stored_fields?: Fields @@ -506,24 +706,44 @@ export interface ExplainResponse { } export interface FieldCapsFieldCapability { + /** Whether this field can be aggregated on all indices. */ aggregatable: boolean + /** The list of indices where this field has the same type family, or null if all indices have the same type family for the field. */ indices?: Indices + /** Merged metadata across all indices as a map of string keys to arrays of values. A value length of 1 indicates that all indices had the same value for this key, while a length of 2 or more indicates that not all indices had the same value for this key. */ meta?: Metadata + /** The list of indices where this field is not aggregatable, or null if all indices have the same definition for the field. */ non_aggregatable_indices?: Indices + /** The list of indices where this field is not searchable, or null if all indices have the same definition for the field. */ non_searchable_indices?: Indices + /** Whether this field is indexed for search on all indices. */ searchable: boolean type: string + /** Whether this field is registered as a metadata field. */ metadata_field?: boolean + /** Whether this field is used as a time series dimension. + * @experimental */ time_series_dimension?: boolean + /** Contains metric type if this fields is used as a time series + * metrics, absent if the field is not used as metric. + * @experimental */ time_series_metric?: MappingTimeSeriesMetricType + /** If this list is present in response then some indices have the + * field marked as a dimension and other indices, the ones in this list, do not. + * @experimental */ non_dimension_indices?: IndexName[] + /** The list of indices where this field is present if these indices + * don’t have the same `time_series_metric` value for this field. + * @experimental */ metric_conflicts_indices?: IndexName[] } export interface FieldCapsRequest extends RequestBase { -/** A comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (*). To target all data streams and indices, omit this parameter or use * or _all. */ + /** A comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (*). To target all data streams and indices, omit this parameter or use * or _all. */ index?: Indices - /** If false, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with foo but no index starts with bar. */ + /** If false, the request returns an error if any wildcard expression, index alias, + * or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request + * targeting `foo*,bar*` returns an error if an index starts with foo but no index starts with bar. */ allow_no_indices?: boolean /** The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`. */ expand_wildcards?: ExpandWildcards @@ -533,15 +753,22 @@ export interface FieldCapsRequest extends RequestBase { include_unmapped?: boolean /** A comma-separated list of filters to apply to the response. */ filters?: string - /** A comma-separated list of field types to include. Any fields that do not match one of these types will be excluded from the results. It defaults to empty, meaning that all field types are returned. */ + /** A comma-separated list of field types to include. + * Any fields that do not match one of these types will be excluded from the results. + * It defaults to empty, meaning that all field types are returned. */ types?: string[] /** If false, empty fields are not included in the response. */ include_empty_fields?: boolean /** A list of fields to retrieve capabilities for. Wildcard (`*`) expressions are supported. */ fields?: Fields - /** Filter indices if the provided query rewrites to `match_none` on every shard. IMPORTANT: The filtering is done on a best-effort basis, it uses index statistics and mappings to rewrite queries to `match_none` instead of fully running the request. For instance a range query over a date field can rewrite to `match_none` if all documents within a shard (including deleted documents) are outside of the provided range. However, not all queries can rewrite to `match_none` so this API may return an index even if the provided filter matches no document. */ + /** Filter indices if the provided query rewrites to `match_none` on every shard. + * + * IMPORTANT: The filtering is done on a best-effort basis, it uses index statistics and mappings to rewrite queries to `match_none` instead of fully running the request. + * For instance a range query over a date field can rewrite to `match_none` if all documents within a shard (including deleted documents) are outside of the provided range. + * However, not all queries can rewrite to `match_none` so this API may return an index even if the provided filter matches no document. */ index_filter?: QueryDslQueryContainer - /** Define ad-hoc runtime fields in the request similar to the way it is done in search requests. These fields exist only as part of the query and take precedence over fields defined with the same name in the index mappings. */ + /** Define ad-hoc runtime fields in the request similar to the way it is done in search requests. + * These fields exist only as part of the query and take precedence over fields defined with the same name in the index mappings. */ runtime_mappings?: MappingRuntimeFields /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, include_unmapped?: never, filters?: never, types?: never, include_empty_fields?: never, fields?: never, index_filter?: never, runtime_mappings?: never } @@ -550,47 +777,78 @@ export interface FieldCapsRequest extends RequestBase { } export interface FieldCapsResponse { + /** The list of indices where this field has the same type family, or null if all indices have the same type family for the field. */ indices: Indices fields: Record> } export interface GetGetResult { + /** The name of the index the document belongs to. */ _index: IndexName + /** If the `stored_fields` parameter is set to `true` and `found` is `true`, it contains the document fields stored in the index. */ fields?: Record _ignored?: string[] + /** Indicates whether the document exists. */ found: boolean + /** The unique identifier for the document. */ _id: Id + /** The primary term assigned to the document for the indexing operation. */ _primary_term?: long + /** The explicit routing, if set. */ _routing?: string + /** The sequence number assigned to the document for the indexing operation. + * Sequence numbers are used to ensure an older version of a document doesn't overwrite a newer version. */ _seq_no?: SequenceNumber + /** If `found` is `true`, it contains the document data formatted in JSON. + * If the `_source` parameter is set to `false` or the `stored_fields` parameter is set to `true`, it is excluded. */ _source?: TDocument + /** The document version, which is ncremented each time the document is updated. */ _version?: VersionNumber } export interface GetRequest extends RequestBase { -/** A unique document identifier. */ + /** A unique document identifier. */ id: Id /** The name of the index that contains the document. */ index: IndexName - /** Indicates whether the request forces synthetic `_source`. Use this paramater to test if the mapping supports synthetic `_source` and to get a sense of the worst case performance. Fetches with this parameter enabled will be slower than enabling synthetic source natively in the index. */ + /** Indicates whether the request forces synthetic `_source`. + * Use this paramater to test if the mapping supports synthetic `_source` and to get a sense of the worst case performance. + * Fetches with this parameter enabled will be slower than enabling synthetic source natively in the index. */ force_synthetic_source?: boolean - /** The node or shard the operation should be performed on. By default, the operation is randomized between the shard replicas. If it is set to `_local`, the operation will prefer to be run on a local allocated shard when possible. If it is set to a custom value, the value is used to guarantee that the same shards will be used for the same custom value. This can help with "jumping values" when hitting different shards in different refresh states. A sample value can be something like the web session ID or the user name. */ + /** The node or shard the operation should be performed on. + * By default, the operation is randomized between the shard replicas. + * + * If it is set to `_local`, the operation will prefer to be run on a local allocated shard when possible. + * If it is set to a custom value, the value is used to guarantee that the same shards will be used for the same custom value. + * This can help with "jumping values" when hitting different shards in different refresh states. + * A sample value can be something like the web session ID or the user name. */ preference?: string /** If `true`, the request is real-time as opposed to near-real-time. */ realtime?: boolean - /** If `true`, the request refreshes the relevant shards before retrieving the document. Setting it to `true` should be done after careful thought and verification that this does not cause a heavy load on the system (and slow down indexing). */ + /** If `true`, the request refreshes the relevant shards before retrieving the document. + * Setting it to `true` should be done after careful thought and verification that this does not cause a heavy load on the system (and slow down indexing). */ refresh?: boolean /** A custom value used to route operations to a specific shard. */ routing?: Routing /** Indicates whether to return the `_source` field (`true` or `false`) or lists the fields to return. */ _source?: SearchSourceConfigParam - /** A comma-separated list of source fields to exclude from the response. You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. */ + /** A comma-separated list of source fields to exclude from the response. + * You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. + * If the `_source` parameter is `false`, this parameter is ignored. */ _source_excludes?: Fields - /** A comma-separated list of source fields to include in the response. If this parameter is specified, only these source fields are returned. You can exclude fields from this subset using the `_source_excludes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. */ + /** A comma-separated list of source fields to include in the response. + * If this parameter is specified, only these source fields are returned. + * You can exclude fields from this subset using the `_source_excludes` query parameter. + * If the `_source` parameter is `false`, this parameter is ignored. */ _source_includes?: Fields - /** A comma-separated list of stored fields to return as part of a hit. If no fields are specified, no stored fields are included in the response. If this field is specified, the `_source` parameter defaults to `false`. Only leaf fields can be retrieved with the `stored_field` option. Object fields can't be returned;if specified, the request fails. */ + /** A comma-separated list of stored fields to return as part of a hit. + * If no fields are specified, no stored fields are included in the response. + * If this field is specified, the `_source` parameter defaults to `false`. + * Only leaf fields can be retrieved with the `stored_field` option. + * Object fields can't be returned;if specified, the request fails. */ stored_fields?: Fields - /** The version number for concurrency control. It must match the current version of the document for the request to succeed. */ + /** The version number for concurrency control. + * It must match the current version of the document for the request to succeed. */ version?: VersionNumber /** The version type. */ version_type?: VersionType @@ -603,9 +861,11 @@ export interface GetRequest extends RequestBase { export type GetResponse = GetGetResult export interface GetScriptRequest extends RequestBase { -/** The identifier for the stored script or search template. */ + /** The identifier for the stored script or search template. */ id: Id - /** The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout. */ + /** The period to wait for the master node. + * If the master node is not available before the timeout expires, the request fails and returns an error. + * It can also be set to `-1` to indicate that the request should never timeout. */ master_timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { id?: never, master_timeout?: never } @@ -664,15 +924,17 @@ export interface GetScriptLanguagesResponse { } export interface GetSourceRequest extends RequestBase { -/** A unique document identifier. */ + /** A unique document identifier. */ id: Id /** The name of the index that contains the document. */ index: IndexName - /** The node or shard the operation should be performed on. By default, the operation is randomized between the shard replicas. */ + /** The node or shard the operation should be performed on. + * By default, the operation is randomized between the shard replicas. */ preference?: string /** If `true`, the request is real-time as opposed to near-real-time. */ realtime?: boolean - /** If `true`, the request refreshes the relevant shards before retrieving the document. Setting it to `true` should be done after careful thought and verification that this does not cause a heavy load on the system (and slow down indexing). */ + /** If `true`, the request refreshes the relevant shards before retrieving the document. + * Setting it to `true` should be done after careful thought and verification that this does not cause a heavy load on the system (and slow down indexing). */ refresh?: boolean /** A custom value used to route operations to a specific shard. */ routing?: Routing @@ -684,7 +946,8 @@ export interface GetSourceRequest extends RequestBase { _source_includes?: Fields /** A comma-separated list of stored fields to return as part of a hit. */ stored_fields?: Fields - /** The version number for concurrency control. It must match the current version of the document for the request to succeed. */ + /** The version number for concurrency control. + * It must match the current version of the document for the request to succeed. */ version?: VersionNumber /** The version type. */ version_type?: VersionType @@ -821,7 +1084,7 @@ export interface HealthReportRepositoryIntegrityIndicatorDetails { } export interface HealthReportRequest extends RequestBase { -/** A feature of the cluster, as returned by the top-level health report API. */ + /** A feature of the cluster, as returned by the top-level health report API. */ feature?: string | string[] /** Explicit operation timeout. */ timeout?: Duration @@ -894,9 +1157,13 @@ export interface HealthReportStagnatingBackingIndices { } export interface IndexRequest extends RequestBase { -/** A unique identifier for the document. To automatically generate a document ID, use the `POST //_doc/` request format and omit this parameter. */ + /** A unique identifier for the document. + * To automatically generate a document ID, use the `POST //_doc/` request format and omit this parameter. */ id?: Id - /** The name of the data stream or index to target. If the target doesn't exist and matches the name or wildcard (`*`) pattern of an index template with a `data_stream` definition, this request creates the data stream. If the target doesn't exist and doesn't match a data stream template, this request creates the index. You can check for existing targets with the resolve index API. */ + /** The name of the data stream or index to target. + * If the target doesn't exist and matches the name or wildcard (`*`) pattern of an index template with a `data_stream` definition, this request creates the data stream. + * If the target doesn't exist and doesn't match a data stream template, this request creates the index. + * You can check for existing targets with the resolve index API. */ index: IndexName /** Only perform the operation if the document has this primary term. */ if_primary_term?: long @@ -904,21 +1171,38 @@ export interface IndexRequest extends RequestBase { if_seq_no?: SequenceNumber /** True or false if to include the document source in the error message in case of parsing errors. */ include_source_on_error?: boolean - /** Set to `create` to only index the document if it does not already exist (put if absent). If a document with the specified `_id` already exists, the indexing operation will fail. The behavior is the same as using the `/_create` endpoint. If a document ID is specified, this paramater defaults to `index`. Otherwise, it defaults to `create`. If the request targets a data stream, an `op_type` of `create` is required. */ + /** Set to `create` to only index the document if it does not already exist (put if absent). + * If a document with the specified `_id` already exists, the indexing operation will fail. + * The behavior is the same as using the `/_create` endpoint. + * If a document ID is specified, this paramater defaults to `index`. + * Otherwise, it defaults to `create`. + * If the request targets a data stream, an `op_type` of `create` is required. */ op_type?: OpType - /** The ID of the pipeline to use to preprocess incoming documents. If the index has a default ingest pipeline specified, then setting the value to `_none` disables the default ingest pipeline for this request. If a final pipeline is configured it will always run, regardless of the value of this parameter. */ + /** The ID of the pipeline to use to preprocess incoming documents. + * If the index has a default ingest pipeline specified, then setting the value to `_none` disables the default ingest pipeline for this request. + * If a final pipeline is configured it will always run, regardless of the value of this parameter. */ pipeline?: string - /** If `true`, Elasticsearch refreshes the affected shards to make this operation visible to search. If `wait_for`, it waits for a refresh to make this operation visible to search. If `false`, it does nothing with refreshes. */ + /** If `true`, Elasticsearch refreshes the affected shards to make this operation visible to search. + * If `wait_for`, it waits for a refresh to make this operation visible to search. + * If `false`, it does nothing with refreshes. */ refresh?: Refresh /** A custom value that is used to route operations to a specific shard. */ routing?: Routing - /** The period the request waits for the following operations: automatic index creation, dynamic mapping updates, waiting for active shards. This parameter is useful for situations where the primary shard assigned to perform the operation might not be available when the operation runs. Some reasons for this might be that the primary shard is currently recovering from a gateway or undergoing relocation. By default, the operation will wait on the primary shard to become available for at least 1 minute before failing and responding with an error. The actual wait time could be longer, particularly when multiple waits occur. */ + /** The period the request waits for the following operations: automatic index creation, dynamic mapping updates, waiting for active shards. + * + * This parameter is useful for situations where the primary shard assigned to perform the operation might not be available when the operation runs. + * Some reasons for this might be that the primary shard is currently recovering from a gateway or undergoing relocation. + * By default, the operation will wait on the primary shard to become available for at least 1 minute before failing and responding with an error. + * The actual wait time could be longer, particularly when multiple waits occur. */ timeout?: Duration - /** An explicit version number for concurrency control. It must be a non-negative long number. */ + /** An explicit version number for concurrency control. + * It must be a non-negative long number. */ version?: VersionNumber /** The version type. */ version_type?: VersionType - /** The number of shard copies that must be active before proceeding with the operation. You can set it to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). The default value of `1` means it waits for each primary shard to be active. */ + /** The number of shard copies that must be active before proceeding with the operation. + * You can set it to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). + * The default value of `1` means it waits for each primary shard to be active. */ wait_for_active_shards?: WaitForActiveShards /** If `true`, the destination must be an index alias. */ require_alias?: boolean @@ -939,52 +1223,16 @@ export interface InfoRequest extends RequestBase { } export interface InfoResponse { + /** The responding cluster's name. */ cluster_name: Name cluster_uuid: Uuid + /** The responding node's name. */ name: Name tagline: string + /** The running version of Elasticsearch. */ version: ElasticsearchVersionInfo } -export interface KnnSearchRequest extends RequestBase { -/** A comma-separated list of index names to search; use `_all` or to perform the operation on all indices. */ - index: Indices - /** A comma-separated list of specific routing values. */ - routing?: Routing - /** Indicates which source fields are returned for matching documents. These fields are returned in the `hits._source` property of the search response. */ - _source?: SearchSourceConfig - /** The request returns doc values for field names matching these patterns in the `hits.fields` property of the response. It accepts wildcard (`*`) patterns. */ - docvalue_fields?: (QueryDslFieldAndFormat | Field)[] - /** A list of stored fields to return as part of a hit. If no fields are specified, no stored fields are included in the response. If this field is specified, the `_source` parameter defaults to `false`. You can pass `_source: true` to return both source fields and stored fields in the search response. */ - stored_fields?: Fields - /** The request returns values for field names matching these patterns in the `hits.fields` property of the response. It accepts wildcard (`*`) patterns. */ - fields?: Fields - /** A query to filter the documents that can match. The kNN search will return the top `k` documents that also match this filter. The value can be a single query or a list of queries. If `filter` isn't provided, all documents are allowed to match. */ - filter?: QueryDslQueryContainer | QueryDslQueryContainer[] - /** The kNN query to run. */ - knn: KnnSearchQuery - /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { index?: never, routing?: never, _source?: never, docvalue_fields?: never, stored_fields?: never, fields?: never, filter?: never, knn?: never } - /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { index?: never, routing?: never, _source?: never, docvalue_fields?: never, stored_fields?: never, fields?: never, filter?: never, knn?: never } -} - -export interface KnnSearchResponse { - took: long - timed_out: boolean - _shards: ShardStatistics - hits: SearchHitsMetadata - fields?: Record - max_score?: double -} - -export interface KnnSearchQuery { - field: Field - query_vector: QueryVector - k: integer - num_candidates: integer -} - export interface MgetMultiGetError { error: ErrorCause _id: Id @@ -992,19 +1240,26 @@ export interface MgetMultiGetError { } export interface MgetOperation { + /** The unique document ID. */ _id: Id + /** The index that contains the document. */ _index?: IndexName + /** The key for the primary shard the document resides on. Required if routing is used during indexing. */ routing?: Routing + /** If `false`, excludes all _source fields. */ _source?: SearchSourceConfig + /** The stored fields you want to retrieve. */ stored_fields?: Fields version?: VersionNumber version_type?: VersionType } export interface MgetRequest extends RequestBase { -/** Name of the index to retrieve documents from when `ids` are specified, or when a document in the `docs` array does not specify an index. */ + /** Name of the index to retrieve documents from when `ids` are specified, or when a document in the `docs` array does not specify an index. */ index?: IndexName - /** Should this request force synthetic _source? Use this to test if the mapping supports synthetic _source and to get a sense of the worst case performance. Fetches with this enabled will be slower the enabling synthetic source natively in the index. */ + /** Should this request force synthetic _source? + * Use this to test if the mapping supports synthetic _source and to get a sense of the worst case performance. + * Fetches with this enabled will be slower the enabling synthetic source natively in the index. */ force_synthetic_source?: boolean /** Specifies the node or shard the operation should be performed on. Random by default. */ preference?: string @@ -1016,9 +1271,12 @@ export interface MgetRequest extends RequestBase { routing?: Routing /** True or false to return the `_source` field or not, or a list of fields to return. */ _source?: SearchSourceConfigParam - /** A comma-separated list of source fields to exclude from the response. You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. */ + /** A comma-separated list of source fields to exclude from the response. + * You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. */ _source_excludes?: Fields - /** A comma-separated list of source fields to include in the response. If this parameter is specified, only these source fields are returned. You can exclude fields from this subset using the `_source_excludes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. */ + /** A comma-separated list of source fields to include in the response. + * If this parameter is specified, only these source fields are returned. You can exclude fields from this subset using the `_source_excludes` query parameter. + * If the `_source` parameter is `false`, this parameter is ignored. */ _source_includes?: Fields /** If `true`, retrieves the document fields stored in the index rather than the document `_source`. */ stored_fields?: Fields @@ -1033,6 +1291,9 @@ export interface MgetRequest extends RequestBase { } export interface MgetResponse { + /** The response includes a docs array that contains the documents in the order specified in the request. + * The structure of the returned documents is similar to that returned by the get API. + * If there is a failure getting a particular document, the error is included in place of the document. */ docs: MgetResponseItem[] } @@ -1047,41 +1308,6 @@ export interface MsearchMultiSearchResult[] } -export interface MsearchMultisearchBody { - aggregations?: Record - aggs?: Record - collapse?: SearchFieldCollapse - query?: QueryDslQueryContainer - explain?: boolean - ext?: Record - stored_fields?: Fields - docvalue_fields?: (QueryDslFieldAndFormat | Field)[] - knn?: KnnSearch | KnnSearch[] - from?: integer - highlight?: SearchHighlight - indices_boost?: Record[] - min_score?: double - post_filter?: QueryDslQueryContainer - profile?: boolean - rescore?: SearchRescore | SearchRescore[] - script_fields?: Record - search_after?: SortResults - size?: integer - sort?: Sort - _source?: SearchSourceConfig - fields?: (QueryDslFieldAndFormat | Field)[] - terminate_after?: long - stats?: string[] - timeout?: string - track_scores?: boolean - track_total_hits?: SearchTrackHits - version?: boolean - runtime_mappings?: MappingRuntimeFields - seq_no_primary_term?: boolean - pit?: SearchPointInTimeReference - suggest?: SearchSuggester -} - export interface MsearchMultisearchHeader { allow_no_indices?: boolean expand_wildcards?: ExpandWildcards @@ -1097,7 +1323,7 @@ export interface MsearchMultisearchHeader { } export interface MsearchRequest extends RequestBase { -/** Comma-separated list of data streams, indices, and index aliases to search. */ + /** Comma-separated list of data streams, indices, and index aliases to search. */ index?: Indices /** If false, the request returns an error if any wildcard expression, index alias, or _all value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting foo*,bar* returns an error if an index starts with foo but no index starts with bar. */ allow_no_indices?: boolean @@ -1109,12 +1335,18 @@ export interface MsearchRequest extends RequestBase { ignore_throttled?: boolean /** If true, missing or closed indices are not included in the response. */ ignore_unavailable?: boolean - /** Indicates whether hit.matched_queries should be rendered as a map that includes the name of the matched query associated with its score (true) or as an array containing the name of the matched queries (false) This functionality reruns each named query on every hit in a search response. Typically, this adds a small overhead to a request. However, using computationally expensive named queries on a large number of hits may add significant overhead. */ + /** Indicates whether hit.matched_queries should be rendered as a map that includes + * the name of the matched query associated with its score (true) + * or as an array containing the name of the matched queries (false) + * This functionality reruns each named query on every hit in a search response. + * Typically, this adds a small overhead to a request. + * However, using computationally expensive named queries on a large number of hits may add significant overhead. */ include_named_queries_score?: boolean - /** Maximum number of concurrent searches the multi search API can execute. */ - max_concurrent_searches?: long + /** Maximum number of concurrent searches the multi search API can execute. + * Defaults to `max(1, (# of data nodes * min(search thread pool size, 10)))`. */ + max_concurrent_searches?: integer /** Maximum number of concurrent shard requests that each sub-search request executes per node. */ - max_concurrent_shard_requests?: long + max_concurrent_shard_requests?: integer /** Defines a threshold that enforces a pre-filter roundtrip to prefilter search shards based on query rewriting if the number of shards the search request expands to exceeds the threshold. This filter roundtrip can limit the number of shards significantly if for instance a shard can not match any documents based on its rewrite method i.e., if date filters are mandatory to match but the shard bounds and the query are disjoint. */ pre_filter_shard_size?: long /** If true, hits.total are returned as an integer in the response. Defaults to false, which returns an object. */ @@ -1132,14 +1364,16 @@ export interface MsearchRequest extends RequestBase { querystring?: { [key: string]: any } & { index?: never, allow_no_indices?: never, ccs_minimize_roundtrips?: never, expand_wildcards?: never, ignore_throttled?: never, ignore_unavailable?: never, include_named_queries_score?: never, max_concurrent_searches?: never, max_concurrent_shard_requests?: never, pre_filter_shard_size?: never, rest_total_hits_as_int?: never, routing?: never, search_type?: never, typed_keys?: never, searches?: never } } -export type MsearchRequestItem = MsearchMultisearchHeader | MsearchMultisearchBody +export type MsearchRequestItem = MsearchMultisearchHeader | SearchSearchRequestBody export type MsearchResponse> = MsearchMultiSearchResult export type MsearchResponseItem = MsearchMultiSearchItem | ErrorResponseBase export interface MsearchTemplateRequest extends RequestBase { -/** A comma-separated list of data streams, indices, and aliases to search. It supports wildcards (`*`). To search all data streams and indices, omit this parameter or use `*`. */ + /** A comma-separated list of data streams, indices, and aliases to search. + * It supports wildcards (`*`). + * To search all data streams and indices, omit this parameter or use `*`. */ index?: Indices /** If `true`, network round-trips are minimized for cross-cluster search requests. */ ccs_minimize_roundtrips?: boolean @@ -1147,7 +1381,8 @@ export interface MsearchTemplateRequest extends RequestBase { max_concurrent_searches?: long /** The type of the search operation. */ search_type?: SearchType - /** If `true`, the response returns `hits.total` as an integer. If `false`, it returns `hits.total` as an object. */ + /** If `true`, the response returns `hits.total` as an integer. + * If `false`, it returns `hits.total` as an object. */ rest_total_hits_as_int?: boolean /** If `true`, the response prefixes aggregation and suggester names with their respective types. */ typed_keys?: boolean @@ -1163,33 +1398,58 @@ export type MsearchTemplateRequestItem = MsearchMultisearchHeader | MsearchTempl export type MsearchTemplateResponse> = MsearchMultiSearchResult export interface MsearchTemplateTemplateConfig { + /** If `true`, returns detailed information about score calculation as part of each hit. */ explain?: boolean + /** The ID of the search template to use. If no `source` is specified, + * this parameter is required. */ id?: Id + /** Key-value pairs used to replace Mustache variables in the template. + * The key is the variable name. + * The value is the variable value. */ params?: Record + /** If `true`, the query execution is profiled. */ profile?: boolean - source?: string + /** An inline search template. Supports the same parameters as the search API's + * request body. It also supports Mustache variables. If no `id` is specified, this + * parameter is required. */ + source?: ScriptSource } export interface MtermvectorsOperation { + /** The ID of the document. */ _id?: Id + /** The index of the document. */ _index?: IndexName + /** An artificial document (a document not present in the index) for which you want to retrieve term vectors. */ doc?: any + /** Comma-separated list or wildcard expressions of fields to include in the statistics. + * Used as the default list unless a specific field list is provided in the `completion_fields` or `fielddata_fields` parameters. */ fields?: Fields + /** If `true`, the response includes the document count, sum of document frequencies, and sum of total term frequencies. */ field_statistics?: boolean + /** Filter terms based on their tf-idf scores. */ filter?: TermvectorsFilter + /** If `true`, the response includes term offsets. */ offsets?: boolean + /** If `true`, the response includes term payloads. */ payloads?: boolean + /** If `true`, the response includes term positions. */ positions?: boolean + /** Custom value used to route operations to a specific shard. */ routing?: Routing + /** If true, the response includes term frequency and document frequency. */ term_statistics?: boolean + /** If `true`, returns the document version as part of a hit. */ version?: VersionNumber + /** Specific version type. */ version_type?: VersionType } export interface MtermvectorsRequest extends RequestBase { -/** The name of the index that contains the documents. */ + /** The name of the index that contains the documents. */ index?: IndexName - /** A comma-separated list or wildcard expressions of fields to include in the statistics. It is used as the default list unless a specific field list is provided in the `completion_fields` or `fielddata_fields` parameters. */ + /** A comma-separated list or wildcard expressions of fields to include in the statistics. + * It is used as the default list unless a specific field list is provided in the `completion_fields` or `fielddata_fields` parameters. */ fields?: Fields /** If `true`, the response includes the document count, sum of document frequencies, and sum of total term frequencies. */ field_statistics?: boolean @@ -1199,7 +1459,8 @@ export interface MtermvectorsRequest extends RequestBase { payloads?: boolean /** If `true`, the response includes term positions. */ positions?: boolean - /** The node or shard the operation should be performed on. It is random by default. */ + /** The node or shard the operation should be performed on. + * It is random by default. */ preference?: string /** If true, the request is real-time as opposed to near-real-time. */ realtime?: boolean @@ -1236,29 +1497,37 @@ export interface MtermvectorsTermVectorsResult { } export interface OpenPointInTimeRequest extends RequestBase { -/** A comma-separated list of index names to open point in time; use `_all` or empty string to perform the operation on all indices */ + /** A comma-separated list of index names to open point in time; use `_all` or empty string to perform the operation on all indices */ index: Indices /** Extend the length of time that the point in time persists. */ keep_alive: Duration /** If `false`, the request returns an error if it targets a missing or closed index. */ ignore_unavailable?: boolean - /** The node or shard the operation should be performed on. By default, it is random. */ + /** The node or shard the operation should be performed on. + * By default, it is random. */ preference?: string /** A custom value that is used to route operations to a specific shard. */ routing?: Routing - /** The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports comma-separated values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. */ + /** The type of index that wildcard patterns can match. + * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. + * It supports comma-separated values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. */ expand_wildcards?: ExpandWildcards - /** Indicates whether the point in time tolerates unavailable shards or shard failures when initially creating the PIT. If `false`, creating a point in time request when a shard is missing or unavailable will throw an exception. If `true`, the point in time will contain all the shards that are available at the time of the request. */ + /** Indicates whether the point in time tolerates unavailable shards or shard failures when initially creating the PIT. + * If `false`, creating a point in time request when a shard is missing or unavailable will throw an exception. + * If `true`, the point in time will contain all the shards that are available at the time of the request. */ allow_partial_search_results?: boolean + /** Maximum number of concurrent shard requests that each sub-search request executes per node. */ + max_concurrent_shard_requests?: integer /** Filter indices if the provided query rewrites to `match_none` on every shard. */ index_filter?: QueryDslQueryContainer /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { index?: never, keep_alive?: never, ignore_unavailable?: never, preference?: never, routing?: never, expand_wildcards?: never, allow_partial_search_results?: never, index_filter?: never } + body?: string | { [key: string]: any } & { index?: never, keep_alive?: never, ignore_unavailable?: never, preference?: never, routing?: never, expand_wildcards?: never, allow_partial_search_results?: never, max_concurrent_shard_requests?: never, index_filter?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { index?: never, keep_alive?: never, ignore_unavailable?: never, preference?: never, routing?: never, expand_wildcards?: never, allow_partial_search_results?: never, index_filter?: never } + querystring?: { [key: string]: any } & { index?: never, keep_alive?: never, ignore_unavailable?: never, preference?: never, routing?: never, expand_wildcards?: never, allow_partial_search_results?: never, max_concurrent_shard_requests?: never, index_filter?: never } } export interface OpenPointInTimeResponse { + /** Shards used to create the PIT */ _shards: ShardStatistics id: Id } @@ -1273,13 +1542,19 @@ export interface PingRequest extends RequestBase { export type PingResponse = boolean export interface PutScriptRequest extends RequestBase { -/** The identifier for the stored script or search template. It must be unique within the cluster. */ + /** The identifier for the stored script or search template. + * It must be unique within the cluster. */ id: Id - /** The context in which the script or search template should run. To prevent errors, the API immediately compiles the script or template in this context. */ + /** The context in which the script or search template should run. + * To prevent errors, the API immediately compiles the script or template in this context. */ context?: Name - /** The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout. */ + /** The period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. + * It can also be set to `-1` to indicate that the request should never timeout. */ master_timeout?: Duration - /** The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout. */ + /** The period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. + * It can also be set to `-1` to indicate that the request should never timeout. */ timeout?: Duration /** The script or search template, its parameters, and its language. */ script: StoredScript @@ -1292,8 +1567,11 @@ export interface PutScriptRequest extends RequestBase { export type PutScriptResponse = AcknowledgedResponseBase export interface RankEvalDocumentRating { + /** The document ID. */ _id: Id + /** The document’s index. For data streams, this should be the document’s backing index. */ _index: IndexName + /** The document’s relevance with regard to this search request. */ rating: integer } @@ -1317,21 +1595,28 @@ export interface RankEvalRankEvalMetric { } export interface RankEvalRankEvalMetricBase { + /** Sets the maximum number of documents retrieved per query. This value will act in place of the usual size parameter in the query. */ k?: integer } export interface RankEvalRankEvalMetricDetail { + /** The metric_score in the details section shows the contribution of this query to the global quality metric score */ metric_score: double + /** The unrated_docs section contains an _index and _id entry for each document in the search result for this query that didn’t have a ratings value. This can be used to ask the user to supply ratings for these documents */ unrated_docs: RankEvalUnratedDocument[] + /** The hits section shows a grouping of the search results with their supplied ratings */ hits: RankEvalRankEvalHitItem[] + /** The metric_details give additional information about the calculated quality metric (e.g. how many of the retrieved documents were relevant). The content varies for each metric but allows for better interpretation of the results */ metric_details: Record> } export interface RankEvalRankEvalMetricDiscountedCumulativeGain extends RankEvalRankEvalMetricBase { + /** If set to true, this metric will calculate the Normalized DCG. */ normalize?: boolean } export interface RankEvalRankEvalMetricExpectedReciprocalRank extends RankEvalRankEvalMetricBase { + /** The highest relevance grade used in the user-supplied relevance judgments. */ maximum_relevance: integer } @@ -1339,10 +1624,12 @@ export interface RankEvalRankEvalMetricMeanReciprocalRank extends RankEvalRankEv } export interface RankEvalRankEvalMetricPrecision extends RankEvalRankEvalMetricRatingTreshold { + /** Controls how unlabeled documents in the search results are counted. If set to true, unlabeled documents are ignored and neither count as relevant or irrelevant. Set to false (the default), they are treated as irrelevant. */ ignore_unlabeled?: boolean } export interface RankEvalRankEvalMetricRatingTreshold extends RankEvalRankEvalMetricBase { + /** Sets the rating threshold above which documents are considered to be "relevant". */ relevant_rating_threshold?: integer } @@ -1355,15 +1642,22 @@ export interface RankEvalRankEvalQuery { } export interface RankEvalRankEvalRequestItem { + /** The search request’s ID, used to group result details later. */ id: Id + /** The query being evaluated. */ request?: RankEvalRankEvalQuery | QueryDslQueryContainer + /** List of document ratings */ ratings: RankEvalDocumentRating[] + /** The search template Id */ template_id?: Id + /** The search template parameters. */ params?: Record } export interface RankEvalRequest extends RequestBase { -/** A comma-separated list of data streams, indices, and index aliases used to limit the request. Wildcard (`*`) expressions are supported. To target all data streams and indices in a cluster, omit this parameter or use `_all` or `*`. */ + /** A comma-separated list of data streams, indices, and index aliases used to limit the request. + * Wildcard (`*`) expressions are supported. + * To target all data streams and indices in a cluster, omit this parameter or use `_all` or `*`. */ index?: Indices /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. */ allow_no_indices?: boolean @@ -1384,7 +1678,9 @@ export interface RankEvalRequest extends RequestBase { } export interface RankEvalResponse { + /** The overall evaluation quality calculated by the defined metric */ metric_score: double + /** The details section contains one entry for every query in the original requests section, keyed by the search request id */ details: Record failures: Record } @@ -1395,34 +1691,66 @@ export interface RankEvalUnratedDocument { } export interface ReindexDestination { + /** The name of the data stream, index, or index alias you are copying to. */ index: IndexName + /** If it is `create`, the operation will only index documents that do not already exist (also known as "put if absent"). + * + * IMPORTANT: To reindex to a data stream destination, this argument must be `create`. */ op_type?: OpType + /** The name of the pipeline to use. */ pipeline?: string + /** By default, a document's routing is preserved unless it's changed by the script. + * If it is `keep`, the routing on the bulk request sent for each match is set to the routing on the match. + * If it is `discard`, the routing on the bulk request sent for each match is set to `null`. + * If it is `=value`, the routing on the bulk request sent for each match is set to all value specified after the equals sign (`=`). */ routing?: Routing + /** The versioning to use for the indexing operation. */ version_type?: VersionType } export interface ReindexRemoteSource { + /** The remote connection timeout. */ connect_timeout?: Duration + /** An object containing the headers of the request. */ headers?: Record + /** The URL for the remote instance of Elasticsearch that you want to index from. + * This information is required when you're indexing from remote. */ host: Host + /** The username to use for authentication with the remote host. */ username?: Username + /** The password to use for authentication with the remote host. */ password?: Password + /** The remote socket read timeout. */ socket_timeout?: Duration } export interface ReindexRequest extends RequestBase { -/** If `true`, the request refreshes affected shards to make this operation visible to search. */ + /** If `true`, the request refreshes affected shards to make this operation visible to search. */ refresh?: boolean - /** The throttle for this request in sub-requests per second. By default, there is no throttle. */ + /** The throttle for this request in sub-requests per second. + * By default, there is no throttle. */ requests_per_second?: float /** The period of time that a consistent view of the index should be maintained for scrolled search. */ scroll?: Duration - /** The number of slices this task should be divided into. It defaults to one slice, which means the task isn't sliced into subtasks. Reindex supports sliced scroll to parallelize the reindexing process. This parallelization can improve efficiency and provide a convenient way to break the request down into smaller parts. NOTE: Reindexing from remote clusters does not support manual or automatic slicing. If set to `auto`, Elasticsearch chooses the number of slices to use. This setting will use one slice per shard, up to a certain limit. If there are multiple sources, it will choose the number of slices based on the index or backing index with the smallest number of shards. */ + /** The number of slices this task should be divided into. + * It defaults to one slice, which means the task isn't sliced into subtasks. + * + * Reindex supports sliced scroll to parallelize the reindexing process. + * This parallelization can improve efficiency and provide a convenient way to break the request down into smaller parts. + * + * NOTE: Reindexing from remote clusters does not support manual or automatic slicing. + * + * If set to `auto`, Elasticsearch chooses the number of slices to use. + * This setting will use one slice per shard, up to a certain limit. + * If there are multiple sources, it will choose the number of slices based on the index or backing index with the smallest number of shards. */ slices?: Slices - /** The period each indexing waits for automatic index creation, dynamic mapping updates, and waiting for active shards. By default, Elasticsearch waits for at least one minute before failing. The actual wait time could be longer, particularly when multiple waits occur. */ + /** The period each indexing waits for automatic index creation, dynamic mapping updates, and waiting for active shards. + * By default, Elasticsearch waits for at least one minute before failing. + * The actual wait time could be longer, particularly when multiple waits occur. */ timeout?: Duration - /** The number of shard copies that must be active before proceeding with the operation. Set it to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). The default value is one, which means it waits for each primary shard to be active. */ + /** The number of shard copies that must be active before proceeding with the operation. + * Set it to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). + * The default value is one, which means it waits for each primary shard to be active. */ wait_for_active_shards?: WaitForActiveShards /** If `true`, the request blocks until the operation is complete. */ wait_for_completion?: boolean @@ -1432,10 +1760,14 @@ export interface ReindexRequest extends RequestBase { conflicts?: Conflicts /** The destination you are copying to. */ dest: ReindexDestination - /** The maximum number of documents to reindex. By default, all documents are reindexed. If it is a value less then or equal to `scroll_size`, a scroll will not be used to retrieve the results for the operation. If `conflicts` is set to `proceed`, the reindex operation could attempt to reindex more documents from the source than `max_docs` until it has successfully indexed `max_docs` documents into the target or it has gone through every document in the source query. */ + /** The maximum number of documents to reindex. + * By default, all documents are reindexed. + * If it is a value less then or equal to `scroll_size`, a scroll will not be used to retrieve the results for the operation. + * + * If `conflicts` is set to `proceed`, the reindex operation could attempt to reindex more documents from the source than `max_docs` until it has successfully indexed `max_docs` documents into the target or it has gone through every document in the source query. */ max_docs?: long /** The script to run to update the document source or metadata when reindexing. */ - script?: Script | string + script?: Script | ScriptSource size?: long /** The source you are copying from. */ source: ReindexSource @@ -1446,31 +1778,65 @@ export interface ReindexRequest extends RequestBase { } export interface ReindexResponse { + /** The number of scroll responses that were pulled back by the reindex. */ batches?: long + /** The number of documents that were successfully created. */ created?: long + /** The number of documents that were successfully deleted. */ deleted?: long + /** If there were any unrecoverable errors during the process, it is an array of those failures. + * If this array is not empty, the request ended because of those failures. + * Reindex is implemented using batches and any failure causes the entire process to end but all failures in the current batch are collected into the array. + * You can use the `conflicts` option to prevent the reindex from ending on version conflicts. */ failures?: BulkIndexByScrollFailure[] + /** The number of documents that were ignored because the script used for the reindex returned a `noop` value for `ctx.op`. */ noops?: long + /** The number of retries attempted by reindex. */ retries?: Retries + /** The number of requests per second effectively run during the reindex. */ requests_per_second?: float slice_id?: integer task?: TaskId + /** The number of milliseconds the request slept to conform to `requests_per_second`. */ throttled_millis?: EpochTime + /** This field should always be equal to zero in a reindex response. + * It has meaning only when using the task API, where it indicates the next time (in milliseconds since epoch) that a throttled request will be run again in order to conform to `requests_per_second`. */ throttled_until_millis?: EpochTime + /** If any of the requests that ran during the reindex timed out, it is `true`. */ timed_out?: boolean + /** The total milliseconds the entire operation took. */ took?: DurationValue + /** The number of documents that were successfully processed. */ total?: long + /** The number of documents that were successfully updated. + * That is to say, a document with the same ID already existed before the reindex updated it. */ updated?: long + /** The number of version conflicts that occurred. */ version_conflicts?: long } export interface ReindexSource { + /** The name of the data stream, index, or alias you are copying from. + * It accepts a comma-separated list to reindex from multiple sources. */ index: Indices + /** The documents to reindex, which is defined with Query DSL. */ query?: QueryDslQueryContainer + /** A remote instance of Elasticsearch that you want to index from. */ remote?: ReindexRemoteSource + /** The number of documents to index per batch. + * Use it when you are indexing from remote to ensure that the batches fit within the on-heap buffer, which defaults to a maximum size of 100 MB. */ size?: integer + /** Slice the reindex request manually using the provided slice ID and total number of slices. */ slice?: SlicedScroll + /** A comma-separated list of `:` pairs to sort by before indexing. + * Use it in conjunction with `max_docs` to control what documents are reindexed. + * + * WARNING: Sort in reindex is deprecated. + * Sorting in reindex was never guaranteed to index documents in order and prevents further development of reindex such as resilience and performance improvements. + * If used in combination with `max_docs`, consider using a query filter instead. */ sort?: Sort + /** If `true`, reindex all source fields. + * Set it to a list to reindex select fields. */ _source?: Fields runtime_mappings?: MappingRuntimeFields } @@ -1480,18 +1846,30 @@ export interface ReindexRethrottleReindexNode extends SpecUtilsBaseNode { } export interface ReindexRethrottleReindexStatus { + /** The number of scroll responses pulled back by the reindex. */ batches: long + /** The number of documents that were successfully created. */ created: long + /** The number of documents that were successfully deleted. */ deleted: long + /** The number of documents that were ignored because the script used for the reindex returned a `noop` value for `ctx.op`. */ noops: long + /** The number of requests per second effectively executed during the reindex. */ requests_per_second: float + /** The number of retries attempted by reindex. `bulk` is the number of bulk actions retried and `search` is the number of search actions retried. */ retries: Retries throttled?: Duration + /** Number of milliseconds the request slept to conform to `requests_per_second`. */ throttled_millis: DurationValue throttled_until?: Duration + /** This field should always be equal to zero in a `_reindex` response. + * It only has meaning when using the Task API, where it indicates the next time (in milliseconds since epoch) a throttled request will be executed again in order to conform to `requests_per_second`. */ throttled_until_millis: DurationValue + /** The number of documents that were successfully processed. */ total: long + /** The number of documents that were successfully updated, for example, a document with same ID already existed prior to reindex updating it. */ updated: long + /** The number of version conflicts that reindex hits. */ version_conflicts: long } @@ -1509,9 +1887,10 @@ export interface ReindexRethrottleReindexTask { } export interface ReindexRethrottleRequest extends RequestBase { -/** The task identifier, which can be found by using the tasks API. */ + /** The task identifier, which can be found by using the tasks API. */ task_id: Id - /** The throttle for this request in sub-requests per second. It can be either `-1` to turn off throttling or any decimal number like `1.7` or `12` to throttle to that level. */ + /** The throttle for this request in sub-requests per second. + * It can be either `-1` to turn off throttling or any decimal number like `1.7` or `12` to throttle to that level. */ requests_per_second?: float /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { task_id?: never, requests_per_second?: never } @@ -1524,13 +1903,19 @@ export interface ReindexRethrottleResponse { } export interface RenderSearchTemplateRequest extends RequestBase { -/** The ID of the search template to render. If no `source` is specified, this or the `id` request body parameter is required. */ + /** The ID of the search template to render. + * If no `source` is specified, this or the `id` request body parameter is required. */ id?: Id file?: string - /** Key-value pairs used to replace Mustache variables in the template. The key is the variable name. The value is the variable value. */ + /** Key-value pairs used to replace Mustache variables in the template. + * The key is the variable name. + * The value is the variable value. */ params?: Record - /** An inline search template. It supports the same parameters as the search API's request body. These parameters also support Mustache variables. If no `id` or `` is specified, this parameter is required. */ - source?: string + /** An inline search template. + * It supports the same parameters as the search API's request body. + * These parameters also support Mustache variables. + * If no `id` or `` is specified, this parameter is required. */ + source?: ScriptSource /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { id?: never, file?: never, params?: never, source?: never } /** All values in `querystring` will be added to the request querystring. */ @@ -1544,18 +1929,29 @@ export interface RenderSearchTemplateResponse { export type ScriptsPainlessExecutePainlessContext = 'painless_test' | 'filter' | 'score' | 'boolean_field' | 'date_field' | 'double_field' | 'geo_point_field' | 'ip_field' | 'keyword_field' | 'long_field' | 'composite_field' export interface ScriptsPainlessExecutePainlessContextSetup { + /** Document that's temporarily indexed in-memory and accessible from the script. */ document: any + /** Index containing a mapping that's compatible with the indexed document. + * You may specify a remote index by prefixing the index with the remote cluster alias. + * For example, `remote1:my_index` indicates that you want to run the painless script against the "my_index" index on the "remote1" cluster. + * This request will be forwarded to the "remote1" cluster if you have configured a connection to that remote cluster. + * + * NOTE: Wildcards are not accepted in the index expression for this endpoint. + * The expression `*:myindex` will return the error "No such remote cluster" and the expression `logs*` or `remote1:logs*` will return the error "index not found". */ index: IndexName + /** Use this parameter to specify a query for computing a score. */ query?: QueryDslQueryContainer } export interface ScriptsPainlessExecuteRequest extends RequestBase { -/** The context that the script should run in. NOTE: Result ordering in the field contexts is not guaranteed. */ + /** The context that the script should run in. + * NOTE: Result ordering in the field contexts is not guaranteed. */ context?: ScriptsPainlessExecutePainlessContext - /** Additional parameters for the `context`. NOTE: This parameter is required for all contexts except `painless_test`, which is the default if no value is provided for `context`. */ + /** Additional parameters for the `context`. + * NOTE: This parameter is required for all contexts except `painless_test`, which is the default if no value is provided for `context`. */ context_setup?: ScriptsPainlessExecutePainlessContextSetup /** The Painless script to run. */ - script?: Script | string + script?: Script | ScriptSource /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { context?: never, context_setup?: never, script?: never } /** All values in `querystring` will be added to the request querystring. */ @@ -1567,7 +1963,7 @@ export interface ScriptsPainlessExecuteResponse { } export interface ScrollRequest extends RequestBase { -/** The scroll ID */ + /** The scroll ID */ scroll_id?: ScrollId /** If true, the API response’s hit.total property is returned as an integer. If false, the API response’s hit.total property is returned as an object. */ rest_total_hits_as_int?: boolean @@ -1582,72 +1978,124 @@ export interface ScrollRequest extends RequestBase { export type ScrollResponse> = SearchResponseBody export interface SearchRequest extends RequestBase { -/** A comma-separated list of data streams, indices, and aliases to search. It supports wildcards (`*`). To search all data streams and indices, omit this parameter or use `*` or `_all`. */ + /** A comma-separated list of data streams, indices, and aliases to search. + * It supports wildcards (`*`). + * To search all data streams and indices, omit this parameter or use `*` or `_all`. */ index?: Indices - /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. */ + /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. + * This behavior applies even if the request targets other open indices. + * For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. */ allow_no_indices?: boolean - /** If `true` and there are shard request timeouts or shard failures, the request returns partial results. If `false`, it returns an error with no partial results. To override the default behavior, you can set the `search.default_allow_partial_results` cluster setting to `false`. */ + /** If `true` and there are shard request timeouts or shard failures, the request returns partial results. + * If `false`, it returns an error with no partial results. + * + * To override the default behavior, you can set the `search.default_allow_partial_results` cluster setting to `false`. */ allow_partial_search_results?: boolean - /** The analyzer to use for the query string. This parameter can be used only when the `q` query string parameter is specified. */ + /** The analyzer to use for the query string. + * This parameter can be used only when the `q` query string parameter is specified. */ analyzer?: string - /** If `true`, wildcard and prefix queries are analyzed. This parameter can be used only when the `q` query string parameter is specified. */ + /** If `true`, wildcard and prefix queries are analyzed. + * This parameter can be used only when the `q` query string parameter is specified. */ analyze_wildcard?: boolean - /** The number of shard results that should be reduced at once on the coordinating node. If the potential number of shards in the request can be large, this value should be used as a protection mechanism to reduce the memory overhead per search request. */ + /** The number of shard results that should be reduced at once on the coordinating node. + * If the potential number of shards in the request can be large, this value should be used as a protection mechanism to reduce the memory overhead per search request. */ batched_reduce_size?: long /** If `true`, network round-trips between the coordinating node and the remote clusters are minimized when running cross-cluster search (CCS) requests. */ ccs_minimize_roundtrips?: boolean - /** The default operator for the query string query: `AND` or `OR`. This parameter can be used only when the `q` query string parameter is specified. */ + /** The default operator for the query string query: `AND` or `OR`. + * This parameter can be used only when the `q` query string parameter is specified. */ default_operator?: QueryDslOperator - /** The field to use as a default when no field prefix is given in the query string. This parameter can be used only when the `q` query string parameter is specified. */ + /** The field to use as a default when no field prefix is given in the query string. + * This parameter can be used only when the `q` query string parameter is specified. */ df?: string - /** The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports comma-separated values such as `open,hidden`. */ + /** The type of index that wildcard patterns can match. + * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. + * It supports comma-separated values such as `open,hidden`. */ expand_wildcards?: ExpandWildcards /** If `true`, concrete, expanded or aliased indices will be ignored when frozen. */ ignore_throttled?: boolean /** If `false`, the request returns an error if it targets a missing or closed index. */ ignore_unavailable?: boolean - /** If `true`, the response includes the score contribution from any named queries. This functionality reruns each named query on every hit in a search response. Typically, this adds a small overhead to a request. However, using computationally expensive named queries on a large number of hits may add significant overhead. */ + /** If `true`, the response includes the score contribution from any named queries. + * + * This functionality reruns each named query on every hit in a search response. + * Typically, this adds a small overhead to a request. + * However, using computationally expensive named queries on a large number of hits may add significant overhead. */ include_named_queries_score?: boolean - /** If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. This parameter can be used only when the `q` query string parameter is specified. */ + /** If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. + * This parameter can be used only when the `q` query string parameter is specified. */ lenient?: boolean - /** The number of concurrent shard requests per node that the search runs concurrently. This value should be used to limit the impact of the search on the cluster in order to limit the number of concurrent shard requests. */ - max_concurrent_shard_requests?: long - /** The nodes and shards used for the search. By default, Elasticsearch selects from eligible nodes and shards using adaptive replica selection, accounting for allocation awareness. Valid values are: * `_only_local` to run the search only on shards on the local node. * `_local` to, if possible, run the search on shards on the local node, or if not, select shards using the default method. * `_only_nodes:,` to run the search on only the specified nodes IDs. If suitable shards exist on more than one selected node, use shards on those nodes using the default method. If none of the specified nodes are available, select shards from any available node using the default method. * `_prefer_nodes:,` to if possible, run the search on the specified nodes IDs. If not, select shards using the default method. `_shards:,` to run the search only on the specified shards. You can combine this value with other `preference` values. However, the `_shards` value must come first. For example: `_shards:2,3|_local`. `` (any string that does not start with `_`) to route searches with the same `` to the same shards in the same order. */ + /** The number of concurrent shard requests per node that the search runs concurrently. + * This value should be used to limit the impact of the search on the cluster in order to limit the number of concurrent shard requests. */ + max_concurrent_shard_requests?: integer + /** The nodes and shards used for the search. + * By default, Elasticsearch selects from eligible nodes and shards using adaptive replica selection, accounting for allocation awareness. + * Valid values are: + * + * * `_only_local` to run the search only on shards on the local node. + * * `_local` to, if possible, run the search on shards on the local node, or if not, select shards using the default method. + * * `_only_nodes:,` to run the search on only the specified nodes IDs. If suitable shards exist on more than one selected node, use shards on those nodes using the default method. If none of the specified nodes are available, select shards from any available node using the default method. + * * `_prefer_nodes:,` to if possible, run the search on the specified nodes IDs. If not, select shards using the default method. + * `_shards:,` to run the search only on the specified shards. You can combine this value with other `preference` values. However, the `_shards` value must come first. For example: `_shards:2,3|_local`. + * `` (any string that does not start with `_`) to route searches with the same `` to the same shards in the same order. */ preference?: string - /** A threshold that enforces a pre-filter roundtrip to prefilter search shards based on query rewriting if the number of shards the search request expands to exceeds the threshold. This filter roundtrip can limit the number of shards significantly if for instance a shard can not match any documents based on its rewrite method (if date filters are mandatory to match but the shard bounds and the query are disjoint). When unspecified, the pre-filter phase is executed if any of these conditions is met: * The request targets more than 128 shards. * The request targets one or more read-only index. * The primary sort of the query targets an indexed field. */ + /** A threshold that enforces a pre-filter roundtrip to prefilter search shards based on query rewriting if the number of shards the search request expands to exceeds the threshold. + * This filter roundtrip can limit the number of shards significantly if for instance a shard can not match any documents based on its rewrite method (if date filters are mandatory to match but the shard bounds and the query are disjoint). + * When unspecified, the pre-filter phase is executed if any of these conditions is met: + * + * * The request targets more than 128 shards. + * * The request targets one or more read-only index. + * * The primary sort of the query targets an indexed field. */ pre_filter_shard_size?: long - /** If `true`, the caching of search results is enabled for requests where `size` is `0`. It defaults to index level settings. */ + /** If `true`, the caching of search results is enabled for requests where `size` is `0`. + * It defaults to index level settings. */ request_cache?: boolean /** A custom value that is used to route operations to a specific shard. */ routing?: Routing - /** The period to retain the search context for scrolling. By default, this value cannot exceed `1d` (24 hours). You can change this limit by using the `search.max_keep_alive` cluster-level setting. */ + /** The period to retain the search context for scrolling. + * By default, this value cannot exceed `1d` (24 hours). + * You can change this limit by using the `search.max_keep_alive` cluster-level setting. */ scroll?: Duration /** Indicates how distributed term frequencies are calculated for relevance scoring. */ search_type?: SearchType /** The field to use for suggestions. */ suggest_field?: Field - /** The suggest mode. This parameter can be used only when the `suggest_field` and `suggest_text` query string parameters are specified. */ + /** The suggest mode. + * This parameter can be used only when the `suggest_field` and `suggest_text` query string parameters are specified. */ suggest_mode?: SuggestMode - /** The number of suggestions to return. This parameter can be used only when the `suggest_field` and `suggest_text` query string parameters are specified. */ + /** The number of suggestions to return. + * This parameter can be used only when the `suggest_field` and `suggest_text` query string parameters are specified. */ suggest_size?: long - /** The source text for which the suggestions should be returned. This parameter can be used only when the `suggest_field` and `suggest_text` query string parameters are specified. */ + /** The source text for which the suggestions should be returned. + * This parameter can be used only when the `suggest_field` and `suggest_text` query string parameters are specified. */ suggest_text?: string /** If `true`, aggregation and suggester names are be prefixed by their respective types in the response. */ typed_keys?: boolean /** Indicates whether `hits.total` should be rendered as an integer or an object in the rest search response. */ rest_total_hits_as_int?: boolean - /** A comma-separated list of source fields to exclude from the response. You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. */ + /** A comma-separated list of source fields to exclude from the response. + * You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. + * If the `_source` parameter is `false`, this parameter is ignored. */ _source_excludes?: Fields - /** A comma-separated list of source fields to include in the response. If this parameter is specified, only these source fields are returned. You can exclude fields from this subset using the `_source_excludes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. */ + /** A comma-separated list of source fields to include in the response. + * If this parameter is specified, only these source fields are returned. + * You can exclude fields from this subset using the `_source_excludes` query parameter. + * If the `_source` parameter is `false`, this parameter is ignored. */ _source_includes?: Fields - /** A query in the Lucene query string syntax. Query parameter searches do not support the full Elasticsearch Query DSL but are handy for testing. IMPORTANT: This parameter overrides the query parameter in the request body. If both parameters are specified, documents matching the query request body parameter are not returned. */ + /** A query in the Lucene query string syntax. + * Query parameter searches do not support the full Elasticsearch Query DSL but are handy for testing. + * + * IMPORTANT: This parameter overrides the query parameter in the request body. + * If both parameters are specified, documents matching the query request body parameter are not returned. */ q?: string - /** Should this request force synthetic _source? Use this to test if the mapping supports synthetic _source and to get a sense of the worst case performance. Fetches with this enabled will be slower the enabling synthetic source natively in the index. */ + /** Should this request force synthetic _source? + * Use this to test if the mapping supports synthetic _source and to get a sense of the worst case performance. + * Fetches with this enabled will be slower the enabling synthetic source natively in the index. */ force_synthetic_source?: boolean /** Defines the aggregations that are run as part of the search request. */ aggregations?: Record - /** @alias aggregations */ - /** Defines the aggregations that are run as part of the search request. */ + /** Defines the aggregations that are run as part of the search request. + * @alias aggregations */ aggs?: Record /** Collapses search results the values of the specified field. */ collapse?: SearchFieldCollapse @@ -1655,51 +2103,82 @@ export interface SearchRequest extends RequestBase { explain?: boolean /** Configuration of search extensions defined by Elasticsearch plugins. */ ext?: Record - /** The starting document offset, which must be non-negative. By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the `search_after` parameter. */ + /** The starting document offset, which must be non-negative. + * By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. + * To page through more hits, use the `search_after` parameter. */ from?: integer /** Specifies the highlighter to use for retrieving highlighted snippets from one or more fields in your search results. */ highlight?: SearchHighlight - /** Number of hits matching the query to count accurately. If `true`, the exact number of hits is returned at the cost of some performance. If `false`, the response does not include the total number of hits matching the query. */ + /** Number of hits matching the query to count accurately. + * If `true`, the exact number of hits is returned at the cost of some performance. + * If `false`, the response does not include the total number of hits matching the query. */ track_total_hits?: SearchTrackHits - /** Boost the `_score` of documents from specified indices. The boost value is the factor by which scores are multiplied. A boost value greater than `1.0` increases the score. A boost value between `0` and `1.0` decreases the score. */ - indices_boost?: Record[] - /** An array of wildcard (`*`) field patterns. The request returns doc values for field names matching these patterns in the `hits.fields` property of the response. */ + /** Boost the `_score` of documents from specified indices. + * The boost value is the factor by which scores are multiplied. + * A boost value greater than `1.0` increases the score. + * A boost value between `0` and `1.0` decreases the score. */ + indices_boost?: Partial>[] + /** An array of wildcard (`*`) field patterns. + * The request returns doc values for field names matching these patterns in the `hits.fields` property of the response. */ docvalue_fields?: (QueryDslFieldAndFormat | Field)[] /** The approximate kNN search to run. */ knn?: KnnSearch | KnnSearch[] - /** The Reciprocal Rank Fusion (RRF) to use. */ + /** The Reciprocal Rank Fusion (RRF) to use. + * @remarks This property is not supported on Elastic Cloud Serverless. */ rank?: RankContainer - /** The minimum `_score` for matching documents. Documents with a lower `_score` are not included in the search results. */ + /** The minimum `_score` for matching documents. + * Documents with a lower `_score` are not included in search results and results collected by aggregations. */ min_score?: double - /** Use the `post_filter` parameter to filter search results. The search hits are filtered after the aggregations are calculated. A post filter has no impact on the aggregation results. */ + /** Use the `post_filter` parameter to filter search results. + * The search hits are filtered after the aggregations are calculated. + * A post filter has no impact on the aggregation results. */ post_filter?: QueryDslQueryContainer - /** Set to `true` to return detailed timing information about the execution of individual components in a search request. NOTE: This is a debugging tool and adds significant overhead to search execution. */ + /** Set to `true` to return detailed timing information about the execution of individual components in a search request. + * NOTE: This is a debugging tool and adds significant overhead to search execution. */ profile?: boolean /** The search definition using the Query DSL. */ query?: QueryDslQueryContainer /** Can be used to improve precision by reordering just the top (for example 100 - 500) documents returned by the `query` and `post_filter` phases. */ rescore?: SearchRescore | SearchRescore[] - /** A retriever is a specification to describe top documents returned from a search. A retriever replaces other elements of the search API that also return top documents such as `query` and `knn`. */ + /** A retriever is a specification to describe top documents returned from a search. + * A retriever replaces other elements of the search API that also return top documents such as `query` and `knn`. */ retriever?: RetrieverContainer /** Retrieve a script evaluation (based on different fields) for each hit. */ script_fields?: Record /** Used to retrieve the next page of hits using a set of sort values from the previous page. */ search_after?: SortResults - /** The number of hits to return, which must not be negative. By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the `search_after` property. */ + /** The number of hits to return, which must not be negative. + * By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. + * To page through more hits, use the `search_after` property. */ size?: integer /** Split a scrolled search into multiple slices that can be consumed independently. */ slice?: SlicedScroll /** A comma-separated list of : pairs. */ sort?: Sort - /** The source fields that are returned for matching documents. These fields are returned in the `hits._source` property of the search response. If the `stored_fields` property is specified, the `_source` property defaults to `false`. Otherwise, it defaults to `true`. */ + /** The source fields that are returned for matching documents. + * These fields are returned in the `hits._source` property of the search response. + * If the `stored_fields` property is specified, the `_source` property defaults to `false`. + * Otherwise, it defaults to `true`. */ _source?: SearchSourceConfig - /** An array of wildcard (`*`) field patterns. The request returns values for field names matching these patterns in the `hits.fields` property of the response. */ + /** An array of wildcard (`*`) field patterns. + * The request returns values for field names matching these patterns in the `hits.fields` property of the response. */ fields?: (QueryDslFieldAndFormat | Field)[] /** Defines a suggester that provides similar looking terms based on a provided text. */ suggest?: SearchSuggester - /** The maximum number of documents to collect for each shard. If a query reaches this limit, Elasticsearch terminates the query early. Elasticsearch collects documents before sorting. IMPORTANT: Use with caution. Elasticsearch applies this property to each shard handling the request. When possible, let Elasticsearch perform early termination automatically. Avoid specifying this property for requests that target data streams with backing indices across multiple data tiers. If set to `0` (default), the query does not terminate early. */ + /** The maximum number of documents to collect for each shard. + * If a query reaches this limit, Elasticsearch terminates the query early. + * Elasticsearch collects documents before sorting. + * + * IMPORTANT: Use with caution. + * Elasticsearch applies this property to each shard handling the request. + * When possible, let Elasticsearch perform early termination automatically. + * Avoid specifying this property for requests that target data streams with backing indices across multiple data tiers. + * + * If set to `0` (default), the query does not terminate early. */ terminate_after?: long - /** The period of time to wait for a response from each shard. If no response is received before the timeout expires, the request fails and returns an error. Defaults to no timeout. */ + /** The period of time to wait for a response from each shard. + * If no response is received before the timeout expires, the request fails and returns an error. + * Defaults to no timeout. */ timeout?: string /** If `true`, calculate and return document scores, even if the scores are not used for sorting. */ track_scores?: boolean @@ -1707,13 +2186,20 @@ export interface SearchRequest extends RequestBase { version?: boolean /** If `true`, the request returns sequence number and primary term of the last modification of each hit. */ seq_no_primary_term?: boolean - /** A comma-separated list of stored fields to return as part of a hit. If no fields are specified, no stored fields are included in the response. If this field is specified, the `_source` property defaults to `false`. You can pass `_source: true` to return both source fields and stored fields in the search response. */ + /** A comma-separated list of stored fields to return as part of a hit. + * If no fields are specified, no stored fields are included in the response. + * If this field is specified, the `_source` property defaults to `false`. + * You can pass `_source: true` to return both source fields and stored fields in the search response. */ stored_fields?: Fields - /** Limit the search to a point in time (PIT). If you provide a PIT, you cannot specify an `` in the request path. */ + /** Limit the search to a point in time (PIT). + * If you provide a PIT, you cannot specify an `` in the request path. */ pit?: SearchPointInTimeReference - /** One or more runtime fields in the search request. These fields take precedence over mapped fields with the same name. */ + /** One or more runtime fields in the search request. + * These fields take precedence over mapped fields with the same name. */ runtime_mappings?: MappingRuntimeFields - /** The stats groups to associate with the search. Each group maintains a statistics aggregation for its associated searches. You can retrieve these stats using the indices stats API. */ + /** The stats groups to associate with the search. + * Each group maintains a statistics aggregation for its associated searches. + * You can retrieve these stats using the indices stats API. */ stats?: string[] /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, allow_partial_search_results?: never, analyzer?: never, analyze_wildcard?: never, batched_reduce_size?: never, ccs_minimize_roundtrips?: never, default_operator?: never, df?: never, expand_wildcards?: never, ignore_throttled?: never, ignore_unavailable?: never, include_named_queries_score?: never, lenient?: never, max_concurrent_shard_requests?: never, preference?: never, pre_filter_shard_size?: never, request_cache?: never, routing?: never, scroll?: never, search_type?: never, suggest_field?: never, suggest_mode?: never, suggest_size?: never, suggest_text?: never, typed_keys?: never, rest_total_hits_as_int?: never, _source_excludes?: never, _source_includes?: never, q?: never, force_synthetic_source?: never, aggregations?: never, aggs?: never, collapse?: never, explain?: never, ext?: never, from?: never, highlight?: never, track_total_hits?: never, indices_boost?: never, docvalue_fields?: never, knn?: never, rank?: never, min_score?: never, post_filter?: never, profile?: never, query?: never, rescore?: never, retriever?: never, script_fields?: never, search_after?: never, size?: never, slice?: never, sort?: never, _source?: never, fields?: never, suggest?: never, terminate_after?: never, timeout?: never, track_scores?: never, version?: never, seq_no_primary_term?: never, stored_fields?: never, pit?: never, runtime_mappings?: never, stats?: never } @@ -1724,9 +2210,25 @@ export interface SearchRequest extends RequestBase { export type SearchResponse> = SearchResponseBody export interface SearchResponseBody> { + /** The number of milliseconds it took Elasticsearch to run the request. + * This value is calculated by measuring the time elapsed between receipt of a request on the coordinating node and the time at which the coordinating node is ready to send the response. + * It includes: + * + * * Communication time between the coordinating node and data nodes + * * Time the request spends in the search thread pool, queued for execution + * * Actual run time + * + * It does not include: + * + * * Time needed to send the request to Elasticsearch + * * Time needed to serialize the JSON response + * * Time needed to send the response to a client */ took: long + /** If `true`, the request timed out before completion; returned results may be partial or empty. */ timed_out: boolean + /** A count of shards used for the request. */ _shards: ShardStatistics + /** The returned documents and metadata. */ hits: SearchHitsMetadata aggregations?: TAggregations _clusters?: ClusterStatistics @@ -1735,6 +2237,9 @@ export interface SearchResponseBody[]> terminated_early?: boolean @@ -1816,10 +2321,20 @@ export interface SearchCollector { } export interface SearchCompletionContext { + /** The factor by which the score of the suggestion should be boosted. + * The score is computed by multiplying the boost with the suggestion weight. */ boost?: double + /** The value of the category to filter/boost on. */ context: SearchContext + /** An array of precision values at which neighboring geohashes should be taken into account. + * Precision value can be a distance value (`5m`, `10km`, etc.) or a raw geohash precision (`1`..`12`). + * Defaults to generating neighbors for index time precision level. */ neighbours?: GeoHashPrecision[] + /** The precision of the geohash to encode the query geo point. + * Can be specified as a distance value (`5m`, `10km`, etc.), or as a raw geohash precision (`1`..`12`). + * Defaults to index time precision level. */ precision?: GeoHashPrecision + /** Whether the category value should be treated as a prefix or not. */ prefix?: boolean } @@ -1841,9 +2356,13 @@ export interface SearchCompletionSuggestOption { } export interface SearchCompletionSuggester extends SearchSuggesterBase { + /** A value, geo point object, or a geo hash string to filter or boost the suggestion on. */ contexts?: Record + /** Enables fuzziness, meaning you can have a typo in your search and still get results back. */ fuzzy?: SearchSuggestFuzziness + /** A regex query that expresses a prefix as a regular expression. */ regex?: SearchRegexOptions + /** Whether duplicate suggestions should be filtered out. */ skip_duplicates?: boolean } @@ -1883,16 +2402,38 @@ export interface SearchDfsStatisticsProfile { } export interface SearchDirectGenerator { + /** The field to fetch the candidate suggestions from. + * Needs to be set globally or per suggestion. */ field: Field + /** The maximum edit distance candidate suggestions can have in order to be considered as a suggestion. + * Can only be `1` or `2`. */ max_edits?: integer + /** A factor that is used to multiply with the shard_size in order to inspect more candidate spelling corrections on the shard level. + * Can improve accuracy at the cost of performance. */ max_inspections?: float + /** The maximum threshold in number of documents in which a suggest text token can exist in order to be included. + * This can be used to exclude high frequency terms—which are usually spelled correctly—from being spellchecked. + * Can be a relative percentage number (for example `0.4`) or an absolute number to represent document frequencies. + * If a value higher than 1 is specified, then fractional can not be specified. */ max_term_freq?: float + /** The minimal threshold in number of documents a suggestion should appear in. + * This can improve quality by only suggesting high frequency terms. + * Can be specified as an absolute number or as a relative percentage of number of documents. + * If a value higher than 1 is specified, the number cannot be fractional. */ min_doc_freq?: float + /** The minimum length a suggest text term must have in order to be included. */ min_word_length?: integer + /** A filter (analyzer) that is applied to each of the generated tokens before they are passed to the actual phrase scorer. */ post_filter?: string + /** A filter (analyzer) that is applied to each of the tokens passed to this candidate generator. + * This filter is applied to the original token before candidates are generated. */ pre_filter?: string + /** The number of minimal prefix characters that must match in order be a candidate suggestions. + * Increasing this number improves spellcheck performance. */ prefix_length?: integer + /** The maximum corrections to be returned per suggest text token. */ size?: integer + /** Controls what suggestions are included on the suggestions generated on each shard. */ suggest_mode?: SuggestMode } @@ -1922,18 +2463,28 @@ export interface SearchFetchProfileDebug { } export interface SearchFieldCollapse { + /** The field to collapse the result set on */ field: Field + /** The number of inner hits and their sort order */ inner_hits?: SearchInnerHits | SearchInnerHits[] + /** The number of concurrent requests allowed to retrieve the inner_hits per group */ max_concurrent_group_searches?: integer collapse?: SearchFieldCollapse } export interface SearchFieldSuggester { + /** Provides auto-complete/search-as-you-type functionality. */ completion?: SearchCompletionSuggester + /** Provides access to word alternatives on a per token basis within a certain string distance. */ phrase?: SearchPhraseSuggester + /** Suggests terms based on edit distance. */ term?: SearchTermSuggester + /** Prefix used to search for suggestions. */ prefix?: string + /** A prefix expressed as a regular expression. */ regex?: string + /** The text to use as input for the suggester. + * Needs to be set globally or per suggestion. */ text?: string } @@ -1944,25 +2495,61 @@ export interface SearchHighlight extends SearchHighlightBase { export interface SearchHighlightBase { type?: SearchHighlighterType + /** A string that contains each boundary character. */ boundary_chars?: string + /** How far to scan for boundary characters. */ boundary_max_scan?: integer + /** Specifies how to break the highlighted fragments: chars, sentence, or word. + * Only valid for the unified and fvh highlighters. + * Defaults to `sentence` for the `unified` highlighter. Defaults to `chars` for the `fvh` highlighter. */ boundary_scanner?: SearchBoundaryScanner + /** Controls which locale is used to search for sentence and word boundaries. + * This parameter takes a form of a language tag, for example: `"en-US"`, `"fr-FR"`, `"ja-JP"`. */ boundary_scanner_locale?: string force_source?: boolean + /** Specifies how text should be broken up in highlight snippets: `simple` or `span`. + * Only valid for the `plain` highlighter. */ fragmenter?: SearchHighlighterFragmenter + /** The size of the highlighted fragment in characters. */ fragment_size?: integer highlight_filter?: boolean + /** Highlight matches for a query other than the search query. + * This is especially useful if you use a rescore query because those are not taken into account by highlighting by default. */ highlight_query?: QueryDslQueryContainer max_fragment_length?: integer + /** If set to a non-negative value, highlighting stops at this defined maximum limit. + * The rest of the text is not processed, thus not highlighted and no error is returned + * The `max_analyzed_offset` query setting does not override the `index.highlight.max_analyzed_offset` setting, which prevails when it’s set to lower value than the query setting. */ max_analyzed_offset?: integer + /** The amount of text you want to return from the beginning of the field if there are no matching fragments to highlight. */ no_match_size?: integer + /** The maximum number of fragments to return. + * If the number of fragments is set to `0`, no fragments are returned. + * Instead, the entire field contents are highlighted and returned. + * This can be handy when you need to highlight short texts such as a title or address, but fragmentation is not required. + * If `number_of_fragments` is `0`, `fragment_size` is ignored. */ number_of_fragments?: integer options?: Record + /** Sorts highlighted fragments by score when set to `score`. + * By default, fragments will be output in the order they appear in the field (order: `none`). + * Setting this option to `score` will output the most relevant fragments first. + * Each highlighter applies its own logic to compute relevancy scores. */ order?: SearchHighlighterOrder + /** Controls the number of matching phrases in a document that are considered. + * Prevents the `fvh` highlighter from analyzing too many phrases and consuming too much memory. + * When using `matched_fields`, `phrase_limit` phrases per matched field are considered. Raising the limit increases query time and consumes more memory. + * Only supported by the `fvh` highlighter. */ phrase_limit?: integer + /** Use in conjunction with `pre_tags` to define the HTML tags to use for the highlighted text. + * By default, highlighted text is wrapped in `` and `` tags. */ post_tags?: string[] + /** Use in conjunction with `post_tags` to define the HTML tags to use for the highlighted text. + * By default, highlighted text is wrapped in `` and `` tags. */ pre_tags?: string[] + /** By default, only fields that contains a query match are highlighted. + * Set to `false` to highlight all fields. */ require_field_match?: boolean + /** Set to `styled` to use the built-in tag schema. */ tags_schema?: SearchHighlighterTagsSchema } @@ -2005,14 +2592,19 @@ export interface SearchHit { } export interface SearchHitsMetadata { + /** Total hit count information, present only if `track_total_hits` wasn't `false` in the search request. */ total?: SearchTotalHits | long hits: SearchHit[] max_score?: double | null } export interface SearchInnerHits { + /** The name for the particular inner hit definition in the response. + * Useful when a search request contains multiple inner hits. */ name?: Name + /** The maximum number of hits to return per `inner_hits`. */ size?: integer + /** Inner hit starting document offset. */ from?: integer collapse?: SearchFieldCollapse docvalue_fields?: (QueryDslFieldAndFormat | Field)[] @@ -2022,6 +2614,8 @@ export interface SearchInnerHits { script_fields?: Record seq_no_primary_term?: boolean fields?: Fields + /** How the inner hits should be sorted per `inner_hits`. + * By default, inner hits are sorted by score. */ sort?: Sort _source?: SearchSourceConfig stored_fields?: Fields @@ -2075,11 +2669,14 @@ export interface SearchKnnQueryProfileResult { } export interface SearchLaplaceSmoothingModel { + /** A constant that is added to all counts to balance weights. */ alpha: double } export interface SearchLearningToRank { + /** The unique identifier of the trained model uploaded to Elasticsearch */ model_id: string + /** Named parameters to be passed to the query templates used for feature */ params?: Record } @@ -2100,18 +2697,25 @@ export interface SearchPhraseSuggest extends SearchSuggestBase { } export interface SearchPhraseSuggestCollate { + /** Parameters to use if the query is templated. */ params?: Record + /** Returns all suggestions with an extra `collate_match` option indicating whether the generated phrase matched any document. */ prune?: boolean + /** A collate query that is run once for every suggestion. */ query: SearchPhraseSuggestCollateQuery } export interface SearchPhraseSuggestCollateQuery { + /** The search template ID. */ id?: Id - source?: string + /** The query source. */ + source?: ScriptSource } export interface SearchPhraseSuggestHighlight { + /** Use in conjunction with `pre_tag` to define the HTML tags to use for the highlighted text. */ post_tag: string + /** Use in conjunction with `post_tag` to define the HTML tags to use for the highlighted text. */ pre_tag: string } @@ -2123,17 +2727,35 @@ export interface SearchPhraseSuggestOption { } export interface SearchPhraseSuggester extends SearchSuggesterBase { + /** Checks each suggestion against the specified query to prune suggestions for which no matching docs exist in the index. */ collate?: SearchPhraseSuggestCollate + /** Defines a factor applied to the input phrases score, which is used as a threshold for other suggest candidates. + * Only candidates that score higher than the threshold will be included in the result. */ confidence?: double + /** A list of candidate generators that produce a list of possible terms per term in the given text. */ direct_generator?: SearchDirectGenerator[] force_unigrams?: boolean + /** Sets max size of the n-grams (shingles) in the field. + * If the field doesn’t contain n-grams (shingles), this should be omitted or set to `1`. + * If the field uses a shingle filter, the `gram_size` is set to the `max_shingle_size` if not explicitly set. */ gram_size?: integer + /** Sets up suggestion highlighting. + * If not provided, no highlighted field is returned. */ highlight?: SearchPhraseSuggestHighlight + /** The maximum percentage of the terms considered to be misspellings in order to form a correction. + * This method accepts a float value in the range `[0..1)` as a fraction of the actual query terms or a number `>=1` as an absolute number of query terms. */ max_errors?: double + /** The likelihood of a term being misspelled even if the term exists in the dictionary. */ real_word_error_likelihood?: double + /** The separator that is used to separate terms in the bigram field. + * If not set, the whitespace character is used as a separator. */ separator?: string + /** Sets the maximum number of suggested terms to be retrieved from each individual shard. */ shard_size?: integer + /** The smoothing model used to balance weight between infrequent grams (grams (shingles) are not existing in the index) and frequent grams (appear at least once in the index). + * The default model is Stupid Backoff. */ smoothing?: SearchSmoothingModelContainer + /** The text/query to provide suggestions for. */ text?: string token_limit?: integer } @@ -2179,7 +2801,9 @@ export interface SearchQueryProfile { } export interface SearchRegexOptions { + /** Optional operators for the regular expression. */ flags?: integer | string + /** Maximum number of automaton states required for the query. */ max_determinized_states?: integer } @@ -2190,9 +2814,14 @@ export interface SearchRescore { } export interface SearchRescoreQuery { + /** The query to use for rescoring. + * This query is only run on the Top-K results returned by the `query` and `post_filter` phases. */ rescore_query: QueryDslQueryContainer + /** Relative importance of the original query versus the rescore query. */ query_weight?: double + /** Relative importance of the rescore query versus the original query. */ rescore_query_weight?: double + /** Determines how scores are combined. */ score_mode?: SearchScoreMode } @@ -2204,6 +2833,118 @@ export interface SearchSearchProfile { rewrite_time: long } +export interface SearchSearchRequestBody { + /** Defines the aggregations that are run as part of the search request. */ + aggregations?: Record + /** Defines the aggregations that are run as part of the search request. + * @alias aggregations */ + aggs?: Record + /** Collapses search results the values of the specified field. */ + collapse?: SearchFieldCollapse + /** If `true`, the request returns detailed information about score computation as part of a hit. */ + explain?: boolean + /** Configuration of search extensions defined by Elasticsearch plugins. */ + ext?: Record + /** The starting document offset, which must be non-negative. + * By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. + * To page through more hits, use the `search_after` parameter. */ + from?: integer + /** Specifies the highlighter to use for retrieving highlighted snippets from one or more fields in your search results. */ + highlight?: SearchHighlight + /** Number of hits matching the query to count accurately. + * If `true`, the exact number of hits is returned at the cost of some performance. + * If `false`, the response does not include the total number of hits matching the query. */ + track_total_hits?: SearchTrackHits + /** Boost the `_score` of documents from specified indices. + * The boost value is the factor by which scores are multiplied. + * A boost value greater than `1.0` increases the score. + * A boost value between `0` and `1.0` decreases the score. */ + indices_boost?: Partial>[] + /** An array of wildcard (`*`) field patterns. + * The request returns doc values for field names matching these patterns in the `hits.fields` property of the response. */ + docvalue_fields?: (QueryDslFieldAndFormat | Field)[] + /** The approximate kNN search to run. */ + knn?: KnnSearch | KnnSearch[] + /** The Reciprocal Rank Fusion (RRF) to use. + * @remarks This property is not supported on Elastic Cloud Serverless. */ + rank?: RankContainer + /** The minimum `_score` for matching documents. + * Documents with a lower `_score` are not included in search results or results collected by aggregations. */ + min_score?: double + /** Use the `post_filter` parameter to filter search results. + * The search hits are filtered after the aggregations are calculated. + * A post filter has no impact on the aggregation results. */ + post_filter?: QueryDslQueryContainer + /** Set to `true` to return detailed timing information about the execution of individual components in a search request. + * NOTE: This is a debugging tool and adds significant overhead to search execution. */ + profile?: boolean + /** The search definition using the Query DSL. */ + query?: QueryDslQueryContainer + /** Can be used to improve precision by reordering just the top (for example 100 - 500) documents returned by the `query` and `post_filter` phases. */ + rescore?: SearchRescore | SearchRescore[] + /** A retriever is a specification to describe top documents returned from a search. + * A retriever replaces other elements of the search API that also return top documents such as `query` and `knn`. */ + retriever?: RetrieverContainer + /** Retrieve a script evaluation (based on different fields) for each hit. */ + script_fields?: Record + /** Used to retrieve the next page of hits using a set of sort values from the previous page. */ + search_after?: SortResults + /** The number of hits to return, which must not be negative. + * By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. + * To page through more hits, use the `search_after` property. */ + size?: integer + /** Split a scrolled search into multiple slices that can be consumed independently. */ + slice?: SlicedScroll + /** A comma-separated list of : pairs. */ + sort?: Sort + /** The source fields that are returned for matching documents. + * These fields are returned in the `hits._source` property of the search response. + * If the `stored_fields` property is specified, the `_source` property defaults to `false`. + * Otherwise, it defaults to `true`. */ + _source?: SearchSourceConfig + /** An array of wildcard (`*`) field patterns. + * The request returns values for field names matching these patterns in the `hits.fields` property of the response. */ + fields?: (QueryDslFieldAndFormat | Field)[] + /** Defines a suggester that provides similar looking terms based on a provided text. */ + suggest?: SearchSuggester + /** The maximum number of documents to collect for each shard. + * If a query reaches this limit, Elasticsearch terminates the query early. + * Elasticsearch collects documents before sorting. + * + * IMPORTANT: Use with caution. + * Elasticsearch applies this property to each shard handling the request. + * When possible, let Elasticsearch perform early termination automatically. + * Avoid specifying this property for requests that target data streams with backing indices across multiple data tiers. + * + * If set to `0` (default), the query does not terminate early. */ + terminate_after?: long + /** The period of time to wait for a response from each shard. + * If no response is received before the timeout expires, the request fails and returns an error. + * Defaults to no timeout. */ + timeout?: string + /** If `true`, calculate and return document scores, even if the scores are not used for sorting. */ + track_scores?: boolean + /** If `true`, the request returns the document version as part of a hit. */ + version?: boolean + /** If `true`, the request returns sequence number and primary term of the last modification of each hit. */ + seq_no_primary_term?: boolean + /** A comma-separated list of stored fields to return as part of a hit. + * If no fields are specified, no stored fields are included in the response. + * If this field is specified, the `_source` property defaults to `false`. + * You can pass `_source: true` to return both source fields and stored fields in the search response. */ + stored_fields?: Fields + /** Limit the search to a point in time (PIT). + * If you provide a PIT, you cannot specify an `` in the request path. */ + pit?: SearchPointInTimeReference + /** One or more runtime fields in the search request. + * These fields take precedence over mapped fields with the same name. */ + runtime_mappings?: MappingRuntimeFields + /** The stats groups to associate with the search. + * Each group maintains a statistics aggregation for its associated searches. + * You can retrieve these stats using the indices stats API. */ + stats?: string[] +} + export interface SearchShardProfile { aggregations: SearchAggregationProfile[] cluster: string @@ -2217,8 +2958,11 @@ export interface SearchShardProfile { } export interface SearchSmoothingModelContainer { + /** A smoothing model that uses an additive smoothing where a constant (typically `1.0` or smaller) is added to all counts to balance weights. */ laplace?: SearchLaplaceSmoothingModel + /** A smoothing model that takes the weighted mean of the unigrams, bigrams, and trigrams based on user supplied weights (lambdas). */ linear_interpolation?: SearchLinearInterpolationSmoothingModel + /** A simple backoff model that backs off to lower order n-gram models if the higher order count is `0` and discounts the lower order n-gram model by a constant factor. */ stupid_backoff?: SearchStupidBackoffSmoothingModel } @@ -2228,14 +2972,17 @@ export type SearchSourceConfigParam = boolean | Fields export interface SearchSourceFilter { excludes?: Fields + /** @alias excludes */ exclude?: Fields includes?: Fields + /** @alias includes */ include?: Fields } export type SearchStringDistance = 'internal' | 'damerau_levenshtein' | 'levenshtein' | 'jaro_winkler' | 'ngram' export interface SearchStupidBackoffSmoothingModel { + /** A constant factor that the lower order n-gram model is discounted by. */ discount: double } @@ -2248,24 +2995,36 @@ export interface SearchSuggestBase { } export interface SearchSuggestFuzziness { + /** The fuzziness factor. */ fuzziness?: Fuzziness + /** Minimum length of the input before fuzzy suggestions are returned. */ min_length?: integer + /** Minimum length of the input, which is not checked for fuzzy alternatives. */ prefix_length?: integer + /** If set to `true`, transpositions are counted as one change instead of two. */ transpositions?: boolean + /** If `true`, all measurements (like fuzzy edit distance, transpositions, and lengths) are measured in Unicode code points instead of in bytes. + * This is slightly slower than raw bytes. */ unicode_aware?: boolean } export type SearchSuggestSort = 'score' | 'frequency' export interface SearchSuggesterKeys { + /** Global suggest text, to avoid repetition when the same text is used in several suggesters */ text?: string } export type SearchSuggester = SearchSuggesterKeys & { [property: string]: SearchFieldSuggester | string } export interface SearchSuggesterBase { + /** The field to fetch the candidate suggestions from. + * Needs to be set globally or per suggestion. */ field: Field + /** The analyzer to analyze the suggest text with. + * Defaults to the search analyzer of the suggest field. */ analyzer?: string + /** The maximum corrections to be returned per suggest text token. */ size?: integer } @@ -2283,16 +3042,36 @@ export interface SearchTermSuggestOption { export interface SearchTermSuggester extends SearchSuggesterBase { lowercase_terms?: boolean + /** The maximum edit distance candidate suggestions can have in order to be considered as a suggestion. + * Can only be `1` or `2`. */ max_edits?: integer + /** A factor that is used to multiply with the shard_size in order to inspect more candidate spelling corrections on the shard level. + * Can improve accuracy at the cost of performance. */ max_inspections?: integer + /** The maximum threshold in number of documents in which a suggest text token can exist in order to be included. + * Can be a relative percentage number (for example `0.4`) or an absolute number to represent document frequencies. + * If a value higher than 1 is specified, then fractional can not be specified. */ max_term_freq?: float + /** The minimal threshold in number of documents a suggestion should appear in. + * This can improve quality by only suggesting high frequency terms. + * Can be specified as an absolute number or as a relative percentage of number of documents. + * If a value higher than 1 is specified, then the number cannot be fractional. */ min_doc_freq?: float + /** The minimum length a suggest text term must have in order to be included. */ min_word_length?: integer + /** The number of minimal prefix characters that must match in order be a candidate for suggestions. + * Increasing this number improves spellcheck performance. */ prefix_length?: integer + /** Sets the maximum number of suggestions to be retrieved from each individual shard. */ shard_size?: integer + /** Defines how suggestions should be sorted per suggest text term. */ sort?: SearchSuggestSort + /** The string distance implementation to use for comparing how similar suggested terms are. */ string_distance?: SearchStringDistance + /** Controls what suggestions are included or controls for what suggest text terms, suggestions should be suggested. */ suggest_mode?: SuggestMode + /** The suggest text. + * Needs to be set globally or per suggestion. */ text?: string } @@ -2306,7 +3085,7 @@ export type SearchTotalHitsRelation = 'eq' | 'gte' export type SearchTrackHits = boolean | integer export interface SearchMvtRequest extends RequestBase { -/** Comma-separated list of data streams, indices, or aliases to search */ + /** Comma-separated list of data streams, indices, or aliases to search */ index: Indices /** Field containing geospatial data to return */ field: Field @@ -2316,33 +3095,78 @@ export interface SearchMvtRequest extends RequestBase { x: SearchMvtCoordinate /** Y coordinate for the vector tile to search */ y: SearchMvtCoordinate - /** Sub-aggregations for the geotile_grid. It supports the following aggregation types: - `avg` - `boxplot` - `cardinality` - `extended stats` - `max` - `median absolute deviation` - `min` - `percentile` - `percentile-rank` - `stats` - `sum` - `value count` The aggregation names can't start with `_mvt_`. The `_mvt_` prefix is reserved for internal aggregations. */ + /** Sub-aggregations for the geotile_grid. + * + * It supports the following aggregation types: + * + * - `avg` + * - `boxplot` + * - `cardinality` + * - `extended stats` + * - `max` + * - `median absolute deviation` + * - `min` + * - `percentile` + * - `percentile-rank` + * - `stats` + * - `sum` + * - `value count` + * + * The aggregation names can't start with `_mvt_`. The `_mvt_` prefix is reserved for internal aggregations. */ aggs?: Record - /** The size, in pixels, of a clipping buffer outside the tile. This allows renderers to avoid outline artifacts from geometries that extend past the extent of the tile. */ + /** The size, in pixels, of a clipping buffer outside the tile. This allows renderers + * to avoid outline artifacts from geometries that extend past the extent of the tile. */ buffer?: integer - /** If `false`, the meta layer's feature is the bounding box of the tile. If `true`, the meta layer's feature is a bounding box resulting from a `geo_bounds` aggregation. The aggregation runs on values that intersect the `//` tile with `wrap_longitude` set to `false`. The resulting bounding box may be larger than the vector tile. */ + /** If `false`, the meta layer's feature is the bounding box of the tile. + * If `true`, the meta layer's feature is a bounding box resulting from a + * `geo_bounds` aggregation. The aggregation runs on values that intersect + * the `//` tile with `wrap_longitude` set to `false`. The resulting + * bounding box may be larger than the vector tile. */ exact_bounds?: boolean /** The size, in pixels, of a side of the tile. Vector tiles are square with equal sides. */ extent?: integer - /** The fields to return in the `hits` layer. It supports wildcards (`*`). This parameter does not support fields with array values. Fields with array values may return inconsistent results. */ + /** The fields to return in the `hits` layer. + * It supports wildcards (`*`). + * This parameter does not support fields with array values. Fields with array + * values may return inconsistent results. */ fields?: Fields /** The aggregation used to create a grid for the `field`. */ grid_agg?: SearchMvtGridAggregationType - /** Additional zoom levels available through the aggs layer. For example, if `` is `7` and `grid_precision` is `8`, you can zoom in up to level 15. Accepts 0-8. If 0, results don't include the aggs layer. */ + /** Additional zoom levels available through the aggs layer. For example, if `` is `7` + * and `grid_precision` is `8`, you can zoom in up to level 15. Accepts 0-8. If 0, results + * don't include the aggs layer. */ grid_precision?: integer - /** Determines the geometry type for features in the aggs layer. In the aggs layer, each feature represents a `geotile_grid` cell. If `grid, each feature is a polygon of the cells bounding box. If `point`, each feature is a Point that is the centroid of the cell. */ + /** Determines the geometry type for features in the aggs layer. In the aggs layer, + * each feature represents a `geotile_grid` cell. If `grid, each feature is a polygon + * of the cells bounding box. If `point`, each feature is a Point that is the centroid + * of the cell. */ grid_type?: SearchMvtGridType /** The query DSL used to filter documents for the search. */ query?: QueryDslQueryContainer - /** Defines one or more runtime fields in the search request. These fields take precedence over mapped fields with the same name. */ + /** Defines one or more runtime fields in the search request. These fields take + * precedence over mapped fields with the same name. */ runtime_mappings?: MappingRuntimeFields - /** The maximum number of features to return in the hits layer. Accepts 0-10000. If 0, results don't include the hits layer. */ + /** The maximum number of features to return in the hits layer. Accepts 0-10000. + * If 0, results don't include the hits layer. */ size?: integer - /** Sort the features in the hits layer. By default, the API calculates a bounding box for each feature. It sorts features based on this box's diagonal length, from longest to shortest. */ + /** Sort the features in the hits layer. By default, the API calculates a bounding + * box for each feature. It sorts features based on this box's diagonal length, + * from longest to shortest. */ sort?: Sort - /** The number of hits matching the query to count accurately. If `true`, the exact number of hits is returned at the cost of some performance. If `false`, the response does not include the total number of hits matching the query. */ + /** The number of hits matching the query to count accurately. If `true`, the exact number + * of hits is returned at the cost of some performance. If `false`, the response does + * not include the total number of hits matching the query. */ track_total_hits?: SearchTrackHits - /** If `true`, the hits and aggs layers will contain additional point features representing suggested label positions for the original features. * `Point` and `MultiPoint` features will have one of the points selected. * `Polygon` and `MultiPolygon` features will have a single point generated, either the centroid, if it is within the polygon, or another point within the polygon selected from the sorted triangle-tree. * `LineString` features will likewise provide a roughly central point selected from the triangle-tree. * The aggregation results will provide one central point for each aggregation bucket. All attributes from the original features will also be copied to the new label features. In addition, the new features will be distinguishable using the tag `_mvt_label_position`. */ + /** If `true`, the hits and aggs layers will contain additional point features representing + * suggested label positions for the original features. + * + * * `Point` and `MultiPoint` features will have one of the points selected. + * * `Polygon` and `MultiPolygon` features will have a single point generated, either the centroid, if it is within the polygon, or another point within the polygon selected from the sorted triangle-tree. + * * `LineString` features will likewise provide a roughly central point selected from the triangle-tree. + * * The aggregation results will provide one central point for each aggregation bucket. + * + * All attributes from the original features will also be copied to the new label features. + * In addition, the new features will be distinguishable using the tag `_mvt_label_position`. */ with_labels?: boolean /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { index?: never, field?: never, zoom?: never, x?: never, y?: never, aggs?: never, buffer?: never, exact_bounds?: never, extent?: never, fields?: never, grid_agg?: never, grid_precision?: never, grid_type?: never, query?: never, runtime_mappings?: never, size?: never, sort?: never, track_total_hits?: never, with_labels?: never } @@ -2361,19 +3185,29 @@ export type SearchMvtGridType = 'grid' | 'point' | 'centroid' export type SearchMvtZoomLevel = integer export interface SearchShardsRequest extends RequestBase { -/** A comma-separated list of data streams, indices, and aliases to search. It supports wildcards (`*`). To search all data streams and indices, omit this parameter or use `*` or `_all`. */ + /** A comma-separated list of data streams, indices, and aliases to search. + * It supports wildcards (`*`). + * To search all data streams and indices, omit this parameter or use `*` or `_all`. */ index?: Indices - /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. */ + /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. + * This behavior applies even if the request targets other open indices. + * For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. */ allow_no_indices?: boolean - /** Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. */ + /** Type of index that wildcard patterns can match. + * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. + * Supports comma-separated values, such as `open,hidden`. + * Valid values are: `all`, `open`, `closed`, `hidden`, `none`. */ expand_wildcards?: ExpandWildcards /** If `false`, the request returns an error if it targets a missing or closed index. */ ignore_unavailable?: boolean /** If `true`, the request retrieves information from the local node only. */ local?: boolean - /** The period to wait for a connection to the master node. If the master node is not available before the timeout expires, the request fails and returns an error. IT can also be set to `-1` to indicate that the request should never timeout. */ + /** The period to wait for a connection to the master node. + * If the master node is not available before the timeout expires, the request fails and returns an error. + * IT can also be set to `-1` to indicate that the request should never timeout. */ master_timeout?: Duration - /** The node or shard the operation should be performed on. It is random by default. */ + /** The node or shard the operation should be performed on. + * It is random by default. */ preference?: string /** A custom value used to route operations to a specific shard. */ routing?: Routing @@ -2390,10 +3224,14 @@ export interface SearchShardsResponse { } export interface SearchShardsSearchShardsNodeAttributes { + /** The human-readable identifier of the node. */ name: NodeName + /** The ephemeral ID of the node. */ ephemeral_id: Id + /** The host and port where transport HTTP connections are accepted. */ transport_address: TransportAddress external_id: string + /** Lists node attributes. */ attributes: Record roles: NodeRoles version: VersionString @@ -2407,40 +3245,55 @@ export interface SearchShardsShardStoreIndex { } export interface SearchTemplateRequest extends RequestBase { -/** A comma-separated list of data streams, indices, and aliases to search. It supports wildcards (`*`). */ + /** A comma-separated list of data streams, indices, and aliases to search. + * It supports wildcards (`*`). */ index?: Indices - /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. */ + /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. + * This behavior applies even if the request targets other open indices. + * For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. */ allow_no_indices?: boolean /** If `true`, network round-trips are minimized for cross-cluster search requests. */ ccs_minimize_roundtrips?: boolean - /** The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. */ + /** The type of index that wildcard patterns can match. + * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. + * Supports comma-separated values, such as `open,hidden`. + * Valid values are: `all`, `open`, `closed`, `hidden`, `none`. */ expand_wildcards?: ExpandWildcards /** If `true`, specified concrete, expanded, or aliased indices are not included in the response when throttled. */ ignore_throttled?: boolean /** If `false`, the request returns an error if it targets a missing or closed index. */ ignore_unavailable?: boolean - /** The node or shard the operation should be performed on. It is random by default. */ + /** The node or shard the operation should be performed on. + * It is random by default. */ preference?: string /** A custom value used to route operations to a specific shard. */ routing?: Routing - /** Specifies how long a consistent view of the index should be maintained for scrolled search. */ + /** Specifies how long a consistent view of the index + * should be maintained for scrolled search. */ scroll?: Duration /** The type of the search operation. */ search_type?: SearchType - /** If `true`, `hits.total` is rendered as an integer in the response. If `false`, it is rendered as an object. */ + /** If `true`, `hits.total` is rendered as an integer in the response. + * If `false`, it is rendered as an object. */ rest_total_hits_as_int?: boolean /** If `true`, the response prefixes aggregation and suggester names with their respective types. */ typed_keys?: boolean - /** If `true`, returns detailed information about score calculation as part of each hit. If you specify both this and the `explain` query parameter, the API uses only the query parameter. */ + /** If `true`, returns detailed information about score calculation as part of each hit. + * If you specify both this and the `explain` query parameter, the API uses only the query parameter. */ explain?: boolean - /** The ID of the search template to use. If no `source` is specified, this parameter is required. */ + /** The ID of the search template to use. If no `source` is specified, + * this parameter is required. */ id?: Id - /** Key-value pairs used to replace Mustache variables in the template. The key is the variable name. The value is the variable value. */ + /** Key-value pairs used to replace Mustache variables in the template. + * The key is the variable name. + * The value is the variable value. */ params?: Record /** If `true`, the query execution is profiled. */ profile?: boolean - /** An inline search template. Supports the same parameters as the search API's request body. It also supports Mustache variables. If no `id` is specified, this parameter is required. */ - source?: string + /** An inline search template. Supports the same parameters as the search API's + * request body. It also supports Mustache variables. If no `id` is specified, this + * parameter is required. */ + source?: ScriptSource /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, ccs_minimize_roundtrips?: never, expand_wildcards?: never, ignore_throttled?: never, ignore_unavailable?: never, preference?: never, routing?: never, scroll?: never, search_type?: never, rest_total_hits_as_int?: never, typed_keys?: never, explain?: never, id?: never, params?: never, profile?: never, source?: never } /** All values in `querystring` will be added to the request querystring. */ @@ -2465,21 +3318,29 @@ export interface SearchTemplateResponse { } export interface TermsEnumRequest extends RequestBase { -/** A comma-separated list of data streams, indices, and index aliases to search. Wildcard (`*`) expressions are supported. To search all data streams or indices, omit this parameter or use `*` or `_all`. */ + /** A comma-separated list of data streams, indices, and index aliases to search. + * Wildcard (`*`) expressions are supported. + * To search all data streams or indices, omit this parameter or use `*` or `_all`. */ index: IndexName /** The string to match at the start of indexed terms. If not provided, all terms in the field are considered. */ field: Field /** The number of matching terms to return. */ size?: integer - /** The maximum length of time to spend collecting results. If the timeout is exceeded the `complete` flag set to `false` in the response and the results may be partial or empty. */ + /** The maximum length of time to spend collecting results. + * If the timeout is exceeded the `complete` flag set to `false` in the response and the results may be partial or empty. */ timeout?: Duration /** When `true`, the provided search string is matched against index terms without case sensitivity. */ case_insensitive?: boolean /** Filter an index shard if the provided query rewrites to `match_none`. */ index_filter?: QueryDslQueryContainer - /** The string to match at the start of indexed terms. If it is not provided, all terms in the field are considered. > info > The prefix string cannot be larger than the largest possible keyword value, which is Lucene's term byte-length limit of 32766. */ + /** The string to match at the start of indexed terms. + * If it is not provided, all terms in the field are considered. + * + * > info + * > The prefix string cannot be larger than the largest possible keyword value, which is Lucene's term byte-length limit of 32766. */ string?: string - /** The string after which terms in the index should be returned. It allows for a form of pagination if the last result from one request is passed as the `search_after` parameter for a subsequent request. */ + /** The string after which terms in the index should be returned. + * It allows for a form of pagination if the last result from one request is passed as the `search_after` parameter for a subsequent request. */ search_after?: string /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { index?: never, field?: never, size?: never, timeout?: never, case_insensitive?: never, index_filter?: never, string?: never, search_after?: never } @@ -2490,6 +3351,8 @@ export interface TermsEnumRequest extends RequestBase { export interface TermsEnumResponse { _shards: ShardStatistics terms: string[] + /** If `false`, the returned terms set may be incomplete and should be treated as approximate. + * This can occur due to a few reasons, such as a request timeout or a node error. */ complete: boolean } @@ -2500,23 +3363,53 @@ export interface TermvectorsFieldStatistics { } export interface TermvectorsFilter { + /** Ignore words which occur in more than this many docs. + * Defaults to unbounded. */ max_doc_freq?: integer + /** The maximum number of terms that must be returned per field. */ max_num_terms?: integer + /** Ignore words with more than this frequency in the source doc. + * It defaults to unbounded. */ max_term_freq?: integer + /** The maximum word length above which words will be ignored. + * Defaults to unbounded. */ max_word_length?: integer + /** Ignore terms which do not occur in at least this many docs. */ min_doc_freq?: integer + /** Ignore words with less than this frequency in the source doc. */ min_term_freq?: integer + /** The minimum word length below which words will be ignored. */ min_word_length?: integer } export interface TermvectorsRequest extends RequestBase { -/** The name of the index that contains the document. */ + /** The name of the index that contains the document. */ index: IndexName /** A unique identifier for the document. */ id?: Id - /** A comma-separated list or wildcard expressions of fields to include in the statistics. It is used as the default list unless a specific field list is provided in the `completion_fields` or `fielddata_fields` parameters. */ + /** The node or shard the operation should be performed on. + * It is random by default. */ + preference?: string + /** If true, the request is real-time as opposed to near-real-time. */ + realtime?: boolean + /** An artificial document (a document not present in the index) for which you want to retrieve term vectors. */ + doc?: TDocument + /** Filter terms based on their tf-idf scores. + * This could be useful in order find out a good characteristic vector of a document. + * This feature works in a similar manner to the second phase of the More Like This Query. */ + filter?: TermvectorsFilter + /** Override the default per-field analyzer. + * This is useful in order to generate term vectors in any fashion, especially when using artificial documents. + * When providing an analyzer for a field that already stores term vectors, the term vectors will be regenerated. */ + per_field_analyzer?: Record + /** A list of fields to include in the statistics. + * It is used as the default list unless a specific field list is provided in the `completion_fields` or `fielddata_fields` parameters. */ fields?: Fields - /** If `true`, the response includes: * The document count (how many documents contain this field). * The sum of document frequencies (the sum of document frequencies for all terms in this field). * The sum of total term frequencies (the sum of total term frequencies of each term in this field). */ + /** If `true`, the response includes: + * + * * The document count (how many documents contain this field). + * * The sum of document frequencies (the sum of document frequencies for all terms in this field). + * * The sum of total term frequencies (the sum of total term frequencies of each term in this field). */ field_statistics?: boolean /** If `true`, the response includes term offsets. */ offsets?: boolean @@ -2524,28 +3417,23 @@ export interface TermvectorsRequest extends RequestBase { payloads?: boolean /** If `true`, the response includes term positions. */ positions?: boolean - /** The node or shard the operation should be performed on. It is random by default. */ - preference?: string - /** If true, the request is real-time as opposed to near-real-time. */ - realtime?: boolean + /** If `true`, the response includes: + * + * * The total term frequency (how often a term occurs in all documents). + * * The document frequency (the number of documents containing the current term). + * + * By default these values are not returned since term statistics can have a serious performance impact. */ + term_statistics?: boolean /** A custom value that is used to route operations to a specific shard. */ routing?: Routing - /** If `true`, the response includes: * The total term frequency (how often a term occurs in all documents). * The document frequency (the number of documents containing the current term). By default these values are not returned since term statistics can have a serious performance impact. */ - term_statistics?: boolean /** If `true`, returns the document version as part of a hit. */ version?: VersionNumber /** The version type. */ version_type?: VersionType - /** An artificial document (a document not present in the index) for which you want to retrieve term vectors. */ - doc?: TDocument - /** Filter terms based on their tf-idf scores. This could be useful in order find out a good characteristic vector of a document. This feature works in a similar manner to the second phase of the More Like This Query. */ - filter?: TermvectorsFilter - /** Override the default per-field analyzer. This is useful in order to generate term vectors in any fashion, especially when using artificial documents. When providing an analyzer for a field that already stores term vectors, the term vectors will be regenerated. */ - per_field_analyzer?: Record /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { index?: never, id?: never, fields?: never, field_statistics?: never, offsets?: never, payloads?: never, positions?: never, preference?: never, realtime?: never, routing?: never, term_statistics?: never, version?: never, version_type?: never, doc?: never, filter?: never, per_field_analyzer?: never } + body?: string | { [key: string]: any } & { index?: never, id?: never, preference?: never, realtime?: never, doc?: never, filter?: never, per_field_analyzer?: never, fields?: never, field_statistics?: never, offsets?: never, payloads?: never, positions?: never, term_statistics?: never, routing?: never, version?: never, version_type?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { index?: never, id?: never, fields?: never, field_statistics?: never, offsets?: never, payloads?: never, positions?: never, preference?: never, realtime?: never, routing?: never, term_statistics?: never, version?: never, version_type?: never, doc?: never, filter?: never, per_field_analyzer?: never } + querystring?: { [key: string]: any } & { index?: never, id?: never, preference?: never, realtime?: never, doc?: never, filter?: never, per_field_analyzer?: never, fields?: never, field_statistics?: never, offsets?: never, payloads?: never, positions?: never, term_statistics?: never, routing?: never, version?: never, version_type?: never } } export interface TermvectorsResponse { @@ -2578,9 +3466,10 @@ export interface TermvectorsToken { } export interface UpdateRequest extends RequestBase { -/** A unique identifier for the document to be updated. */ + /** A unique identifier for the document to be updated. */ id: Id - /** The name of the target index. By default, the index is created automatically if it doesn't exist. */ + /** The name of the target index. + * By default, the index is created automatically if it doesn't exist. */ index: IndexName /** Only perform the operation if the document has this primary term. */ if_primary_term?: long @@ -2590,7 +3479,9 @@ export interface UpdateRequest include_source_on_error?: boolean /** The script language. */ lang?: string - /** If 'true', Elasticsearch refreshes the affected shards to make this operation visible to search. If 'wait_for', it waits for a refresh to make this operation visible to search. If 'false', it does nothing with refreshes. */ + /** If 'true', Elasticsearch refreshes the affected shards to make this operation visible to search. + * If 'wait_for', it waits for a refresh to make this operation visible to search. + * If 'false', it does nothing with refreshes. */ refresh?: Refresh /** If `true`, the destination must be an index alias. */ require_alias?: boolean @@ -2598,9 +3489,13 @@ export interface UpdateRequest retry_on_conflict?: integer /** A custom value used to route operations to a specific shard. */ routing?: Routing - /** The period to wait for the following operations: dynamic mapping updates and waiting for active shards. Elasticsearch waits for at least the timeout period before failing. The actual wait time could be longer, particularly when multiple waits occur. */ + /** The period to wait for the following operations: dynamic mapping updates and waiting for active shards. + * Elasticsearch waits for at least the timeout period before failing. + * The actual wait time could be longer, particularly when multiple waits occur. */ timeout?: Duration - /** The number of copies of each shard that must be active before proceeding with the operation. Set to 'all' or any positive integer up to the total number of shards in the index (`number_of_replicas`+1). The default value of `1` means it waits for each primary shard to be active. */ + /** The number of copies of each shard that must be active before proceeding with the operation. + * Set to 'all' or any positive integer up to the total number of shards in the index (`number_of_replicas`+1). + * The default value of `1` means it waits for each primary shard to be active. */ wait_for_active_shards?: WaitForActiveShards /** The source fields you want to exclude. */ _source_excludes?: Fields @@ -2608,17 +3503,21 @@ export interface UpdateRequest _source_includes?: Fields /** If `true`, the `result` in the response is set to `noop` (no operation) when there are no changes to the document. */ detect_noop?: boolean - /** A partial update to an existing document. If both `doc` and `script` are specified, `doc` is ignored. */ + /** A partial update to an existing document. + * If both `doc` and `script` are specified, `doc` is ignored. */ doc?: TPartialDocument - /** If `true`, use the contents of 'doc' as the value of 'upsert'. NOTE: Using ingest pipelines with `doc_as_upsert` is not supported. */ + /** If `true`, use the contents of 'doc' as the value of 'upsert'. + * NOTE: Using ingest pipelines with `doc_as_upsert` is not supported. */ doc_as_upsert?: boolean /** The script to run to update the document. */ - script?: Script | string + script?: Script | ScriptSource /** If `true`, run the script whether or not the document exists. */ scripted_upsert?: boolean - /** If `false`, turn off source retrieval. You can also specify a comma-separated list of the fields you want to retrieve. */ + /** If `false`, turn off source retrieval. + * You can also specify a comma-separated list of the fields you want to retrieve. */ _source?: SearchSourceConfig - /** If the document does not already exist, the contents of 'upsert' are inserted as a new document. If the document exists, the 'script' is run. */ + /** If the document does not already exist, the contents of 'upsert' are inserted as a new document. + * If the document exists, the 'script' is run. */ upsert?: TDocument /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { id?: never, index?: never, if_primary_term?: never, if_seq_no?: never, include_source_on_error?: never, lang?: never, refresh?: never, require_alias?: never, retry_on_conflict?: never, routing?: never, timeout?: never, wait_for_active_shards?: never, _source_excludes?: never, _source_includes?: never, detect_noop?: never, doc?: never, doc_as_upsert?: never, script?: never, scripted_upsert?: never, _source?: never, upsert?: never } @@ -2633,35 +3532,52 @@ export interface UpdateUpdateWriteResponseBase extends Writ } export interface UpdateByQueryRequest extends RequestBase { -/** A comma-separated list of data streams, indices, and aliases to search. It supports wildcards (`*`). To search all data streams or indices, omit this parameter or use `*` or `_all`. */ + /** A comma-separated list of data streams, indices, and aliases to search. + * It supports wildcards (`*`). + * To search all data streams or indices, omit this parameter or use `*` or `_all`. */ index: Indices - /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. */ + /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. + * This behavior applies even if the request targets other open indices. + * For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. */ allow_no_indices?: boolean - /** The analyzer to use for the query string. This parameter can be used only when the `q` query string parameter is specified. */ + /** The analyzer to use for the query string. + * This parameter can be used only when the `q` query string parameter is specified. */ analyzer?: string - /** If `true`, wildcard and prefix queries are analyzed. This parameter can be used only when the `q` query string parameter is specified. */ + /** If `true`, wildcard and prefix queries are analyzed. + * This parameter can be used only when the `q` query string parameter is specified. */ analyze_wildcard?: boolean - /** The default operator for query string query: `AND` or `OR`. This parameter can be used only when the `q` query string parameter is specified. */ + /** The default operator for query string query: `AND` or `OR`. + * This parameter can be used only when the `q` query string parameter is specified. */ default_operator?: QueryDslOperator - /** The field to use as default where no field prefix is given in the query string. This parameter can be used only when the `q` query string parameter is specified. */ + /** The field to use as default where no field prefix is given in the query string. + * This parameter can be used only when the `q` query string parameter is specified. */ df?: string - /** The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports comma-separated values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. */ + /** The type of index that wildcard patterns can match. + * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. + * It supports comma-separated values, such as `open,hidden`. + * Valid values are: `all`, `open`, `closed`, `hidden`, `none`. */ expand_wildcards?: ExpandWildcards - /** Starting offset (default: 0) */ + /** Skips the specified number of documents. */ from?: long /** If `false`, the request returns an error if it targets a missing or closed index. */ ignore_unavailable?: boolean - /** If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. This parameter can be used only when the `q` query string parameter is specified. */ + /** If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. + * This parameter can be used only when the `q` query string parameter is specified. */ lenient?: boolean - /** The ID of the pipeline to use to preprocess incoming documents. If the index has a default ingest pipeline specified, then setting the value to `_none` disables the default ingest pipeline for this request. If a final pipeline is configured it will always run, regardless of the value of this parameter. */ + /** The ID of the pipeline to use to preprocess incoming documents. + * If the index has a default ingest pipeline specified, then setting the value to `_none` disables the default ingest pipeline for this request. + * If a final pipeline is configured it will always run, regardless of the value of this parameter. */ pipeline?: string - /** The node or shard the operation should be performed on. It is random by default. */ + /** The node or shard the operation should be performed on. + * It is random by default. */ preference?: string /** A query in the Lucene query string syntax. */ q?: string - /** If `true`, Elasticsearch refreshes affected shards to make the operation visible to search after the request completes. This is different than the update API's `refresh` parameter, which causes just the shard that received the request to be refreshed. */ + /** If `true`, Elasticsearch refreshes affected shards to make the operation visible to search after the request completes. + * This is different than the update API's `refresh` parameter, which causes just the shard that received the request to be refreshed. */ refresh?: boolean - /** If `true`, the request cache is used for this request. It defaults to the index-level setting. */ + /** If `true`, the request cache is used for this request. + * It defaults to the index-level setting. */ request_cache?: boolean /** The throttle for this request in sub-requests per second. */ requests_per_second?: float @@ -2671,7 +3587,8 @@ export interface UpdateByQueryRequest extends RequestBase { scroll?: Duration /** The size of the scroll request that powers the operation. */ scroll_size?: long - /** An explicit timeout for each search request. By default, there is no timeout. */ + /** An explicit timeout for each search request. + * By default, there is no timeout. */ search_timeout?: Duration /** The type of the search operation. Available options include `query_then_fetch` and `dfs_query_then_fetch`. */ search_type?: SearchType @@ -2681,24 +3598,39 @@ export interface UpdateByQueryRequest extends RequestBase { sort?: string[] /** The specific `tag` of the request for logging and statistical purposes. */ stats?: string[] - /** The maximum number of documents to collect for each shard. If a query reaches this limit, Elasticsearch terminates the query early. Elasticsearch collects documents before sorting. IMPORTANT: Use with caution. Elasticsearch applies this parameter to each shard handling the request. When possible, let Elasticsearch perform early termination automatically. Avoid specifying this parameter for requests that target data streams with backing indices across multiple data tiers. */ + /** The maximum number of documents to collect for each shard. + * If a query reaches this limit, Elasticsearch terminates the query early. + * Elasticsearch collects documents before sorting. + * + * IMPORTANT: Use with caution. + * Elasticsearch applies this parameter to each shard handling the request. + * When possible, let Elasticsearch perform early termination automatically. + * Avoid specifying this parameter for requests that target data streams with backing indices across multiple data tiers. */ terminate_after?: long - /** The period each update request waits for the following operations: dynamic mapping updates, waiting for active shards. By default, it is one minute. This guarantees Elasticsearch waits for at least the timeout before failing. The actual wait time could be longer, particularly when multiple waits occur. */ + /** The period each update request waits for the following operations: dynamic mapping updates, waiting for active shards. + * By default, it is one minute. + * This guarantees Elasticsearch waits for at least the timeout before failing. + * The actual wait time could be longer, particularly when multiple waits occur. */ timeout?: Duration /** If `true`, returns the document version as part of a hit. */ version?: boolean /** Should the document increment the version number (internal) on hit or not (reindex) */ version_type?: boolean - /** The number of shard copies that must be active before proceeding with the operation. Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). The `timeout` parameter controls how long each write request waits for unavailable shards to become available. Both work exactly the way they work in the bulk API. */ + /** The number of shard copies that must be active before proceeding with the operation. + * Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). + * The `timeout` parameter controls how long each write request waits for unavailable shards to become available. + * Both work exactly the way they work in the bulk API. */ wait_for_active_shards?: WaitForActiveShards - /** If `true`, the request blocks until the operation is complete. If `false`, Elasticsearch performs some preflight checks, launches the request, and returns a task ID that you can use to cancel or get the status of the task. Elasticsearch creates a record of this task as a document at `.tasks/task/${taskId}`. */ + /** If `true`, the request blocks until the operation is complete. + * If `false`, Elasticsearch performs some preflight checks, launches the request, and returns a task ID that you can use to cancel or get the status of the task. + * Elasticsearch creates a record of this task as a document at `.tasks/task/${taskId}`. */ wait_for_completion?: boolean /** The maximum number of documents to update. */ max_docs?: long /** The documents to update using the Query DSL. */ query?: QueryDslQueryContainer /** The script to run to update the document source or metadata when updating. */ - script?: Script | string + script?: Script | ScriptSource /** Slice the request manually using the provided slice ID and total number of slices. */ slice?: SlicedScroll /** The preferred behavior when update by query hits version conflicts: `abort` or `proceed`. */ @@ -2710,28 +3642,49 @@ export interface UpdateByQueryRequest extends RequestBase { } export interface UpdateByQueryResponse { + /** The number of scroll responses pulled back by the update by query. */ batches?: long + /** Array of failures if there were any unrecoverable errors during the process. + * If this is non-empty then the request ended because of those failures. + * Update by query is implemented using batches. + * Any failure causes the entire process to end, but all failures in the current batch are collected into the array. + * You can use the `conflicts` option to prevent reindex from ending when version conflicts occur. */ failures?: BulkIndexByScrollFailure[] + /** The number of documents that were ignored because the script used for the update by query returned a noop value for `ctx.op`. */ noops?: long + /** The number of documents that were successfully deleted. */ deleted?: long + /** The number of requests per second effectively run during the update by query. */ requests_per_second?: float + /** The number of retries attempted by update by query. + * `bulk` is the number of bulk actions retried. + * `search` is the number of search actions retried. */ retries?: Retries task?: TaskId + /** If true, some requests timed out during the update by query. */ timed_out?: boolean + /** The number of milliseconds from start to end of the whole operation. */ took?: DurationValue + /** The number of documents that were successfully processed. */ total?: long + /** The number of documents that were successfully updated. */ updated?: long + /** The number of version conflicts that the update by query hit. */ version_conflicts?: long throttled?: Duration + /** The number of milliseconds the request slept to conform to `requests_per_second`. */ throttled_millis?: DurationValue throttled_until?: Duration + /** This field should always be equal to zero in an _update_by_query response. + * It only has meaning when using the task API, where it indicates the next time (in milliseconds since epoch) a throttled request will be run again in order to conform to `requests_per_second`. */ throttled_until_millis?: DurationValue } export interface UpdateByQueryRethrottleRequest extends RequestBase { -/** The ID for the task. */ + /** The ID for the task. */ task_id: Id - /** The throttle for this request in sub-requests per second. To turn off throttling, set it to `-1`. */ + /** The throttle for this request in sub-requests per second. + * To turn off throttling, set it to `-1`. */ requests_per_second?: float /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { task_id?: never, requests_per_second?: never } @@ -2765,6 +3718,7 @@ export type SpecUtilsStringified = T | string export type SpecUtilsWithNullValue = T | SpecUtilsNullValue export interface AcknowledgedResponseBase { + /** For a successful response, this value is always true. On failure, an exception is returned instead. */ acknowledged: boolean } @@ -2775,7 +3729,6 @@ export interface BulkIndexByScrollFailure { id: Id index: IndexName status: integer - type: string } export interface BulkStats { @@ -2824,7 +3777,9 @@ export interface ClusterStatistics { } export interface CompletionStats { + /** Total amount, in bytes, of memory used for completion across all shards assigned to selected nodes. */ size_in_bytes: long + /** Total amount of memory used for completion across all shards assigned to selected nodes. */ size?: ByteSize fields?: Record } @@ -2859,7 +3814,12 @@ export type Distance = string export type DistanceUnit = 'in' | 'ft' | 'yd' | 'mi' | 'nmi' | 'km' | 'm' | 'cm' | 'mm' export interface DocStats { + /** Total number of non-deleted documents across all primary shards assigned to selected nodes. + * This number is based on documents in Lucene segments and may include documents from nested fields. */ count: long + /** Total number of deleted documents across all primary shards assigned to selected nodes. + * This number is based on documents in Lucene segments. + * Elasticsearch reclaims the disk space of deleted Lucene documents when a segment is merged. */ deleted?: long } @@ -2870,14 +3830,25 @@ export type DurationLarge = string export type DurationValue = Unit export interface ElasticsearchVersionInfo { + /** The Elasticsearch Git commit's date. */ build_date: DateTime + /** The build flavor. For example, `default`. */ build_flavor: string + /** The Elasticsearch Git commit's SHA hash. */ build_hash: string + /** Indicates whether the Elasticsearch build was a snapshot. */ build_snapshot: boolean + /** The build type that corresponds to how Elasticsearch was installed. + * For example, `docker`, `rpm`, or `tar`. */ build_type: string + /** The version number of Elasticsearch's underlying Lucene software. */ lucene_version: VersionString + /** The minimum index version with which the responding node can read from disk. */ minimum_index_compatibility_version: VersionString + /** The minimum node version with which the responding node can communicate. + * Also the minimum version from which you can perform a rolling upgrade. */ minimum_wire_compatibility_version: VersionString + /** The Elasticsearch version number. */ number: string } @@ -2894,8 +3865,11 @@ export interface EmptyObject { export type EpochTime = Unit export interface ErrorCauseKeys { + /** The type of error */ type: string + /** A human-readable explanation of the error, in English. */ reason?: string + /** The server stack trace. Present only if the `error_trace=true` parameter was sent with the request. */ stack_trace?: string caused_by?: ErrorCause root_cause?: ErrorCause[] @@ -2909,8 +3883,6 @@ export interface ErrorResponseBase { status: integer } -export type EsqlResult = ArrayBuffer - export type ExpandWildcard = 'all' | 'open' | 'closed' | 'hidden' | 'none' export type ExpandWildcards = ExpandWildcard | ExpandWildcard[] @@ -2985,7 +3957,9 @@ export type GeoHashPrecision = number | string export type GeoHexCell = string export interface GeoLine { + /** Always `"LineString"` */ type: string + /** Array of `[lon, lat]` coordinates */ coordinates: double[][] } @@ -3057,9 +4031,17 @@ export interface IndexingStats { export type Indices = IndexName | IndexName[] export interface IndicesOptions { + /** If false, the request returns an error if any wildcard expression, index alias, or `_all` value targets only + * missing or closed indices. This behavior applies even if the request targets other open indices. For example, + * a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. */ allow_no_indices?: boolean + /** Type of index that wildcard patterns can match. If the request can target data streams, this argument + * determines whether wildcard expressions match hidden data streams. Supports comma-separated values, + * such as `open,hidden`. */ expand_wildcards?: ExpandWildcards + /** If true, missing or closed indices are not included in the response. */ ignore_unavailable?: boolean + /** If true, concrete, expanded or aliased indices are ignored when frozen. */ ignore_throttled?: boolean } @@ -3081,41 +4063,71 @@ export type InlineGet = InlineGetKeys export type Ip = string export interface KnnQuery extends QueryDslQueryBase { + /** The name of the vector field to search against */ field: Field + /** The query vector */ query_vector?: QueryVector + /** The query vector builder. You must provide a query_vector_builder or query_vector, but not both. */ query_vector_builder?: QueryVectorBuilder + /** The number of nearest neighbor candidates to consider per shard */ num_candidates?: integer + /** The final number of nearest neighbors to return as top hits */ k?: integer + /** Filters for the kNN search query */ filter?: QueryDslQueryContainer | QueryDslQueryContainer[] + /** The minimum similarity for a vector to be considered a match */ similarity?: float + /** Apply oversampling and rescoring to quantized vectors * + * @experimental */ rescore_vector?: RescoreVector } export interface KnnRetriever extends RetrieverBase { + /** The name of the vector field to search against. */ field: string + /** Query vector. Must have the same number of dimensions as the vector field you are searching against. You must provide a query_vector_builder or query_vector, but not both. */ query_vector?: QueryVector + /** Defines a model to build a query vector. */ query_vector_builder?: QueryVectorBuilder + /** Number of nearest neighbors to return as top hits. */ k: integer + /** Number of nearest neighbor candidates to consider per shard. */ num_candidates: integer + /** The minimum similarity required for a document to be considered a match. */ similarity?: float + /** Apply oversampling and rescoring to quantized vectors * + * @experimental */ rescore_vector?: RescoreVector } export interface KnnSearch { + /** The name of the vector field to search against */ field: Field + /** The query vector */ query_vector?: QueryVector + /** The query vector builder. You must provide a query_vector_builder or query_vector, but not both. */ query_vector_builder?: QueryVectorBuilder + /** The final number of nearest neighbors to return as top hits */ k?: integer + /** The number of nearest neighbor candidates to consider per shard */ num_candidates?: integer + /** Boost value to apply to kNN scores */ boost?: float + /** Filters for the kNN search query */ filter?: QueryDslQueryContainer | QueryDslQueryContainer[] + /** The minimum similarity for a vector to be considered a match */ similarity?: float + /** If defined, each search hit will contain inner hits. */ inner_hits?: SearchInnerHits + /** Apply oversampling and rescoring to quantized vectors * + * @experimental */ rescore_vector?: RescoreVector } export interface LatLonGeoLocation { + /** Latitude */ lat: double + /** Longitude */ lon: double } @@ -3166,10 +4178,15 @@ export interface NestedSortValue { } export interface NodeAttributes { + /** Lists node attributes. */ attributes: Record + /** The ephemeral ID of the node. */ ephemeral_id: Id + /** The unique identifier of the node. */ id?: NodeId + /** The unique identifier of the node. */ name: NodeName + /** The host and port where transport HTTP connections are accepted. */ transport_address: TransportAddress } @@ -3198,8 +4215,11 @@ export interface NodeShard { export interface NodeStatistics { failures?: ErrorCause[] + /** Total number of nodes selected by the request. */ total: integer + /** Number of nodes that responded successfully to the request. */ successful: integer + /** Number of nodes that rejected the request or failed to respond. If this value is not 0, a reason for the rejection or failure is included in the response. */ failed: integer } @@ -3228,13 +4248,22 @@ export interface PluginStats { export type PropertyName = string export interface QueryCacheStats { + /** Total number of entries added to the query cache across all shards assigned to selected nodes. + * This number includes current and evicted entries. */ cache_count: long + /** Total number of entries currently in the query cache across all shards assigned to selected nodes. */ cache_size: long + /** Total number of query cache evictions across all shards assigned to selected nodes. */ evictions: long + /** Total count of query cache hits across all shards assigned to selected nodes. */ hit_count: long + /** Total amount of memory used for the query cache across all shards assigned to selected nodes. */ memory_size?: ByteSize + /** Total amount, in bytes, of memory used for the query cache across all shards assigned to selected nodes. */ memory_size_in_bytes: long + /** Total count of query cache misses across all shards assigned to selected nodes. */ miss_count: long + /** Total count of hits and misses in the query cache across all shards assigned to selected nodes. */ total_count: long } @@ -3245,8 +4274,11 @@ export interface QueryVectorBuilder { } export interface RRFRetriever extends RetrieverBase { + /** A list of child retrievers to specify which sets of returned top documents will have the RRF formula applied to them. */ retrievers: RetrieverContainer[] + /** This value determines how much influence documents in individual result sets per query have over the final ranked result set. */ rank_constant?: integer + /** This value determines the size of the individual result sets per query. */ rank_window_size?: integer } @@ -3254,6 +4286,7 @@ export interface RankBase { } export interface RankContainer { + /** The reciprocal rank fusion parameters */ rrf?: RrfRank } @@ -3293,40 +4326,56 @@ export interface RequestCacheStats { } export interface RescoreVector { + /** Applies the specified oversample factor to k on the approximate kNN search */ oversample: float } export type Result = 'created' | 'updated' | 'deleted' | 'not_found' | 'noop' export interface Retries { + /** The number of bulk actions retried. */ bulk: long + /** The number of search actions retried. */ search: long } export interface RetrieverBase { + /** Query to filter the documents that can match. */ filter?: QueryDslQueryContainer | QueryDslQueryContainer[] + /** Minimum _score for matching documents. Documents with a lower _score are not included in the top documents. */ min_score?: float } export interface RetrieverContainer { + /** A retriever that replaces the functionality of a traditional query. */ standard?: StandardRetriever + /** A retriever that replaces the functionality of a knn search. */ knn?: KnnRetriever + /** A retriever that produces top documents from reciprocal rank fusion (RRF). */ rrf?: RRFRetriever + /** A retriever that reranks the top documents based on a reranking model using the InferenceAPI */ text_similarity_reranker?: TextSimilarityReranker + /** A retriever that replaces the functionality of a rule query. */ rule?: RuleRetriever } export type Routing = string export interface RrfRank { + /** How much influence documents in individual result sets per query have over the final ranked result set */ rank_constant?: long + /** Size of the individual result sets per query */ rank_window_size?: long } export interface RuleRetriever extends RetrieverBase { + /** The ruleset IDs containing the rules this retriever is evaluating against. */ ruleset_ids: Id[] + /** The match criteria that will determine if a rule in the provided rulesets should be applied. */ match_criteria: any + /** The retriever whose results rules should be applied to. */ retriever: RetrieverContainer + /** This value determines the size of the individual result set. */ rank_window_size?: integer } @@ -3337,15 +4386,20 @@ export interface ScoreSort { } export interface Script { - source?: string + /** The script source. */ + source?: ScriptSource + /** The `id` for a stored script. */ id?: Id + /** Specifies any named parameters that are passed into the script as variables. + * Use parameters instead of hard-coded values to decrease compile time. */ params?: Record + /** Specifies the language the script is written in. */ lang?: ScriptLanguage options?: Record } export interface ScriptField { - script: Script | string + script: Script | ScriptSource ignore_failure?: boolean } @@ -3353,7 +4407,7 @@ export type ScriptLanguage = 'painless' | 'expression' | 'mustache' | 'java' | s export interface ScriptSort { order?: SortOrder - script: Script | string + script: Script | ScriptSource type?: ScriptSortType mode?: SortMode nested?: NestedSortValue @@ -3361,10 +4415,12 @@ export interface ScriptSort { export type ScriptSortType = 'string' | 'number' | 'version' +export type ScriptSource = string | SearchSearchRequestBody + export interface ScriptTransform { lang?: string params?: Record - source?: string + source?: ScriptSource id?: string } @@ -3401,29 +4457,53 @@ export interface SearchTransform { export type SearchType = 'query_then_fetch' | 'dfs_query_then_fetch' export interface SegmentsStats { + /** Total number of segments across all shards assigned to selected nodes. */ count: integer + /** Total amount of memory used for doc values across all shards assigned to selected nodes. */ doc_values_memory?: ByteSize + /** Total amount, in bytes, of memory used for doc values across all shards assigned to selected nodes. */ doc_values_memory_in_bytes: long + /** This object is not populated by the cluster stats API. + * To get information on segment files, use the node stats API. */ file_sizes: Record + /** Total amount of memory used by fixed bit sets across all shards assigned to selected nodes. + * Fixed bit sets are used for nested object field types and type filters for join fields. */ fixed_bit_set?: ByteSize + /** Total amount of memory, in bytes, used by fixed bit sets across all shards assigned to selected nodes. */ fixed_bit_set_memory_in_bytes: long + /** Total amount of memory used by all index writers across all shards assigned to selected nodes. */ index_writer_memory?: ByteSize index_writer_max_memory_in_bytes?: long + /** Total amount, in bytes, of memory used by all index writers across all shards assigned to selected nodes. */ index_writer_memory_in_bytes: long + /** Unix timestamp, in milliseconds, of the most recently retried indexing request. */ max_unsafe_auto_id_timestamp: long + /** Total amount of memory used for segments across all shards assigned to selected nodes. */ memory?: ByteSize + /** Total amount, in bytes, of memory used for segments across all shards assigned to selected nodes. */ memory_in_bytes: long + /** Total amount of memory used for normalization factors across all shards assigned to selected nodes. */ norms_memory?: ByteSize + /** Total amount, in bytes, of memory used for normalization factors across all shards assigned to selected nodes. */ norms_memory_in_bytes: long + /** Total amount of memory used for points across all shards assigned to selected nodes. */ points_memory?: ByteSize + /** Total amount, in bytes, of memory used for points across all shards assigned to selected nodes. */ points_memory_in_bytes: long stored_memory?: ByteSize + /** Total amount, in bytes, of memory used for stored fields across all shards assigned to selected nodes. */ stored_fields_memory_in_bytes: long + /** Total amount, in bytes, of memory used for terms across all shards assigned to selected nodes. */ terms_memory_in_bytes: long + /** Total amount of memory used for terms across all shards assigned to selected nodes. */ terms_memory?: ByteSize + /** Total amount of memory used for term vectors across all shards assigned to selected nodes. */ term_vectory_memory?: ByteSize + /** Total amount, in bytes, of memory used for term vectors across all shards assigned to selected nodes. */ term_vectors_memory_in_bytes: long + /** Total amount of memory used by all version maps across all shards assigned to selected nodes. */ version_map_memory?: ByteSize + /** Total amount, in bytes, of memory used by all version maps across all shards assigned to selected nodes. */ version_map_memory_in_bytes: long } @@ -3440,8 +4520,11 @@ export interface ShardFailure { } export interface ShardStatistics { + /** The number of shards the operation or search attempted to run on but failed. */ failed: uint + /** The number of shards the operation or search succeeded on. */ successful: uint + /** The number of shards the operation or search will run on overall. */ total: uint failures?: ShardFailure[] skipped?: uint @@ -3481,26 +4564,43 @@ export type SortOrder = 'asc' | 'desc' export type SortResults = FieldValue[] export interface StandardRetriever extends RetrieverBase { + /** Defines a query to retrieve a set of top documents. */ query?: QueryDslQueryContainer + /** Defines a search after object parameter used for pagination. */ search_after?: SortResults + /** Maximum number of documents to collect for each shard. */ terminate_after?: integer + /** A sort object that that specifies the order of matching documents. */ sort?: Sort + /** Collapses the top documents by a specified key into a single top document per key. */ collapse?: SearchFieldCollapse } export interface StoreStats { + /** Total size of all shards assigned to selected nodes. */ size?: ByteSize + /** Total size, in bytes, of all shards assigned to selected nodes. */ size_in_bytes: long + /** A prediction of how much larger the shard stores will eventually grow due to ongoing peer recoveries, restoring snapshots, and similar activities. */ reserved?: ByteSize + /** A prediction, in bytes, of how much larger the shard stores will eventually grow due to ongoing peer recoveries, restoring snapshots, and similar activities. */ reserved_in_bytes: long + /** Total data set size of all shards assigned to selected nodes. + * This includes the size of shards not stored fully on the nodes, such as the cache for partially mounted indices. */ total_data_set_size?: ByteSize + /** Total data set size, in bytes, of all shards assigned to selected nodes. + * This includes the size of shards not stored fully on the nodes, such as the cache for partially mounted indices. */ total_data_set_size_in_bytes?: long } export interface StoredScript { + /** The language the script is written in. + * For search templates, use `mustache`. */ lang: ScriptLanguage options?: Record - source: string + /** The script source. + * For search templates, an object containing the search template. */ + source: ScriptSource } export type StreamResult = ArrayBuffer @@ -3524,10 +4624,15 @@ export interface TextEmbedding { } export interface TextSimilarityReranker extends RetrieverBase { + /** The nested retriever which will produce the first-level results, that will later be used for reranking. */ retriever: RetrieverContainer + /** This value determines how many documents we will consider from the nested retriever. */ rank_window_size?: integer + /** Unique identifier of the inference endpoint created using the inference API. */ inference_id?: string + /** The text snippet used as the basis for similarity comparison */ inference_text?: string + /** The document field to be used for text similarity comparisons. This field should contain the text that will be evaluated against the inference_text */ field?: string } @@ -3603,12 +4708,20 @@ export interface WktGeoBounds { } export interface WriteResponseBase { + /** The unique identifier for the added document. */ _id: Id + /** The name of the index the document was added to. */ _index: IndexName + /** The primary term assigned to the document for the indexing operation. */ _primary_term?: long + /** The result of the indexing operation: `created` or `updated`. */ result: Result + /** The sequence number assigned to the document for the indexing operation. + * Sequence numbers are used to ensure an older version of a document doesn't overwrite a newer version. */ _seq_no?: SequenceNumber + /** Information about the replication process of the operation. */ _shards: ShardStatistics + /** The document version, which is incremented each time the document is updated. */ _version: VersionNumber forced_refresh?: boolean } @@ -3633,7 +4746,10 @@ export interface AggregationsAdjacencyMatrixAggregate extends AggregationsMultiB } export interface AggregationsAdjacencyMatrixAggregation extends AggregationsBucketAggregationBase { + /** Filters used to create buckets. + * At least one filter is required. */ filters?: Record + /** Separator used to concatenate filter names. Defaults to &. */ separator?: string } @@ -3655,92 +4771,199 @@ export interface AggregationsAggregation { } export interface AggregationsAggregationContainer { + /** Sub-aggregations for this aggregation. + * Only applies to bucket aggregations. */ aggregations?: Record + /** Sub-aggregations for this aggregation. + * Only applies to bucket aggregations. + * @alias aggregations */ aggs?: Record meta?: Metadata + /** A bucket aggregation returning a form of adjacency matrix. + * The request provides a collection of named filter expressions, similar to the `filters` aggregation. + * Each bucket in the response represents a non-empty cell in the matrix of intersecting filters. */ adjacency_matrix?: AggregationsAdjacencyMatrixAggregation + /** A multi-bucket aggregation similar to the date histogram, except instead of providing an interval to use as the width of each bucket, a target number of buckets is provided. */ auto_date_histogram?: AggregationsAutoDateHistogramAggregation + /** A single-value metrics aggregation that computes the average of numeric values that are extracted from the aggregated documents. */ avg?: AggregationsAverageAggregation + /** A sibling pipeline aggregation which calculates the mean value of a specified metric in a sibling aggregation. + * The specified metric must be numeric and the sibling aggregation must be a multi-bucket aggregation. */ avg_bucket?: AggregationsAverageBucketAggregation + /** A metrics aggregation that computes a box plot of numeric values extracted from the aggregated documents. */ boxplot?: AggregationsBoxplotAggregation + /** A parent pipeline aggregation which runs a script which can perform per bucket computations on metrics in the parent multi-bucket aggregation. */ bucket_script?: AggregationsBucketScriptAggregation + /** A parent pipeline aggregation which runs a script to determine whether the current bucket will be retained in the parent multi-bucket aggregation. */ bucket_selector?: AggregationsBucketSelectorAggregation + /** A parent pipeline aggregation which sorts the buckets of its parent multi-bucket aggregation. */ bucket_sort?: AggregationsBucketSortAggregation + /** A sibling pipeline aggregation which runs a two sample Kolmogorov–Smirnov test ("K-S test") against a provided distribution and the distribution implied by the documents counts in the configured sibling aggregation. + * @experimental */ bucket_count_ks_test?: AggregationsBucketKsAggregation + /** A sibling pipeline aggregation which runs a correlation function on the configured sibling multi-bucket aggregation. + * @experimental */ bucket_correlation?: AggregationsBucketCorrelationAggregation + /** A single-value metrics aggregation that calculates an approximate count of distinct values. */ cardinality?: AggregationsCardinalityAggregation + /** A multi-bucket aggregation that groups semi-structured text into buckets. + * @experimental */ categorize_text?: AggregationsCategorizeTextAggregation + /** A single bucket aggregation that selects child documents that have the specified type, as defined in a `join` field. */ children?: AggregationsChildrenAggregation + /** A multi-bucket aggregation that creates composite buckets from different sources. + * Unlike the other multi-bucket aggregations, you can use the `composite` aggregation to paginate *all* buckets from a multi-level aggregation efficiently. */ composite?: AggregationsCompositeAggregation + /** A parent pipeline aggregation which calculates the cumulative cardinality in a parent `histogram` or `date_histogram` aggregation. */ cumulative_cardinality?: AggregationsCumulativeCardinalityAggregation + /** A parent pipeline aggregation which calculates the cumulative sum of a specified metric in a parent `histogram` or `date_histogram` aggregation. */ cumulative_sum?: AggregationsCumulativeSumAggregation + /** A multi-bucket values source based aggregation that can be applied on date values or date range values extracted from the documents. + * It dynamically builds fixed size (interval) buckets over the values. */ date_histogram?: AggregationsDateHistogramAggregation + /** A multi-bucket value source based aggregation that enables the user to define a set of date ranges - each representing a bucket. */ date_range?: AggregationsDateRangeAggregation + /** A parent pipeline aggregation which calculates the derivative of a specified metric in a parent `histogram` or `date_histogram` aggregation. */ derivative?: AggregationsDerivativeAggregation + /** A filtering aggregation used to limit any sub aggregations' processing to a sample of the top-scoring documents. + * Similar to the `sampler` aggregation, but adds the ability to limit the number of matches that share a common value. */ diversified_sampler?: AggregationsDiversifiedSamplerAggregation + /** A multi-value metrics aggregation that computes stats over numeric values extracted from the aggregated documents. */ extended_stats?: AggregationsExtendedStatsAggregation + /** A sibling pipeline aggregation which calculates a variety of stats across all bucket of a specified metric in a sibling aggregation. */ extended_stats_bucket?: AggregationsExtendedStatsBucketAggregation + /** A bucket aggregation which finds frequent item sets, a form of association rules mining that identifies items that often occur together. */ frequent_item_sets?: AggregationsFrequentItemSetsAggregation + /** A single bucket aggregation that narrows the set of documents to those that match a query. */ filter?: QueryDslQueryContainer + /** A multi-bucket aggregation where each bucket contains the documents that match a query. */ filters?: AggregationsFiltersAggregation + /** A metric aggregation that computes the geographic bounding box containing all values for a Geopoint or Geoshape field. */ geo_bounds?: AggregationsGeoBoundsAggregation + /** A metric aggregation that computes the weighted centroid from all coordinate values for geo fields. */ geo_centroid?: AggregationsGeoCentroidAggregation + /** A multi-bucket aggregation that works on `geo_point` fields. + * Evaluates the distance of each document value from an origin point and determines the buckets it belongs to, based on ranges defined in the request. */ geo_distance?: AggregationsGeoDistanceAggregation + /** A multi-bucket aggregation that groups `geo_point` and `geo_shape` values into buckets that represent a grid. + * Each cell is labeled using a geohash which is of user-definable precision. */ geohash_grid?: AggregationsGeoHashGridAggregation + /** Aggregates all `geo_point` values within a bucket into a `LineString` ordered by the chosen sort field. */ geo_line?: AggregationsGeoLineAggregation + /** A multi-bucket aggregation that groups `geo_point` and `geo_shape` values into buckets that represent a grid. + * Each cell corresponds to a map tile as used by many online map sites. */ geotile_grid?: AggregationsGeoTileGridAggregation + /** A multi-bucket aggregation that groups `geo_point` and `geo_shape` values into buckets that represent a grid. + * Each cell corresponds to a H3 cell index and is labeled using the H3Index representation. */ geohex_grid?: AggregationsGeohexGridAggregation + /** Defines a single bucket of all the documents within the search execution context. + * This context is defined by the indices and the document types you’re searching on, but is not influenced by the search query itself. */ global?: AggregationsGlobalAggregation + /** A multi-bucket values source based aggregation that can be applied on numeric values or numeric range values extracted from the documents. + * It dynamically builds fixed size (interval) buckets over the values. */ histogram?: AggregationsHistogramAggregation + /** A multi-bucket value source based aggregation that enables the user to define a set of IP ranges - each representing a bucket. */ ip_range?: AggregationsIpRangeAggregation + /** A bucket aggregation that groups documents based on the network or sub-network of an IP address. */ ip_prefix?: AggregationsIpPrefixAggregation + /** A parent pipeline aggregation which loads a pre-trained model and performs inference on the collated result fields from the parent bucket aggregation. */ inference?: AggregationsInferenceAggregation line?: AggregationsGeoLineAggregation + /** A numeric aggregation that computes the following statistics over a set of document fields: `count`, `mean`, `variance`, `skewness`, `kurtosis`, `covariance`, and `covariance`. */ matrix_stats?: AggregationsMatrixStatsAggregation + /** A single-value metrics aggregation that returns the maximum value among the numeric values extracted from the aggregated documents. */ max?: AggregationsMaxAggregation + /** A sibling pipeline aggregation which identifies the bucket(s) with the maximum value of a specified metric in a sibling aggregation and outputs both the value and the key(s) of the bucket(s). */ max_bucket?: AggregationsMaxBucketAggregation + /** A single-value aggregation that approximates the median absolute deviation of its search results. */ median_absolute_deviation?: AggregationsMedianAbsoluteDeviationAggregation + /** A single-value metrics aggregation that returns the minimum value among numeric values extracted from the aggregated documents. */ min?: AggregationsMinAggregation + /** A sibling pipeline aggregation which identifies the bucket(s) with the minimum value of a specified metric in a sibling aggregation and outputs both the value and the key(s) of the bucket(s). */ min_bucket?: AggregationsMinBucketAggregation + /** A field data based single bucket aggregation, that creates a bucket of all documents in the current document set context that are missing a field value (effectively, missing a field or having the configured NULL value set). */ missing?: AggregationsMissingAggregation moving_avg?: AggregationsMovingAverageAggregation + /** Given an ordered series of percentiles, "slides" a window across those percentiles and computes cumulative percentiles. */ moving_percentiles?: AggregationsMovingPercentilesAggregation + /** Given an ordered series of data, "slides" a window across the data and runs a custom script on each window of data. + * For convenience, a number of common functions are predefined such as `min`, `max`, and moving averages. */ moving_fn?: AggregationsMovingFunctionAggregation + /** A multi-bucket value source based aggregation where buckets are dynamically built - one per unique set of values. */ multi_terms?: AggregationsMultiTermsAggregation + /** A special single bucket aggregation that enables aggregating nested documents. */ nested?: AggregationsNestedAggregation + /** A parent pipeline aggregation which calculates the specific normalized/rescaled value for a specific bucket value. */ normalize?: AggregationsNormalizeAggregation + /** A special single bucket aggregation that selects parent documents that have the specified type, as defined in a `join` field. */ parent?: AggregationsParentAggregation + /** A multi-value metrics aggregation that calculates one or more percentile ranks over numeric values extracted from the aggregated documents. */ percentile_ranks?: AggregationsPercentileRanksAggregation + /** A multi-value metrics aggregation that calculates one or more percentiles over numeric values extracted from the aggregated documents. */ percentiles?: AggregationsPercentilesAggregation + /** A sibling pipeline aggregation which calculates percentiles across all bucket of a specified metric in a sibling aggregation. */ percentiles_bucket?: AggregationsPercentilesBucketAggregation + /** A multi-bucket value source based aggregation that enables the user to define a set of ranges - each representing a bucket. */ range?: AggregationsRangeAggregation + /** A multi-bucket value source based aggregation which finds "rare" terms—terms that are at the long-tail of the distribution and are not frequent. */ rare_terms?: AggregationsRareTermsAggregation + /** Calculates a rate of documents or a field in each bucket. + * Can only be used inside a `date_histogram` or `composite` aggregation. */ rate?: AggregationsRateAggregation + /** A special single bucket aggregation that enables aggregating on parent documents from nested documents. + * Should only be defined inside a `nested` aggregation. */ reverse_nested?: AggregationsReverseNestedAggregation + /** A single bucket aggregation that randomly includes documents in the aggregated results. + * Sampling provides significant speed improvement at the cost of accuracy. + * @remarks This property is not supported on Elastic Cloud Serverless. + * @experimental */ random_sampler?: AggregationsRandomSamplerAggregation + /** A filtering aggregation used to limit any sub aggregations' processing to a sample of the top-scoring documents. */ sampler?: AggregationsSamplerAggregation + /** A metric aggregation that uses scripts to provide a metric output. */ scripted_metric?: AggregationsScriptedMetricAggregation + /** An aggregation that subtracts values in a time series from themselves at different time lags or periods. */ serial_diff?: AggregationsSerialDifferencingAggregation + /** Returns interesting or unusual occurrences of terms in a set. */ significant_terms?: AggregationsSignificantTermsAggregation + /** Returns interesting or unusual occurrences of free-text terms in a set. */ significant_text?: AggregationsSignificantTextAggregation + /** A multi-value metrics aggregation that computes stats over numeric values extracted from the aggregated documents. */ stats?: AggregationsStatsAggregation + /** A sibling pipeline aggregation which calculates a variety of stats across all bucket of a specified metric in a sibling aggregation. */ stats_bucket?: AggregationsStatsBucketAggregation + /** A multi-value metrics aggregation that computes statistics over string values extracted from the aggregated documents. */ string_stats?: AggregationsStringStatsAggregation + /** A single-value metrics aggregation that sums numeric values that are extracted from the aggregated documents. */ sum?: AggregationsSumAggregation + /** A sibling pipeline aggregation which calculates the sum of a specified metric across all buckets in a sibling aggregation. */ sum_bucket?: AggregationsSumBucketAggregation + /** A multi-bucket value source based aggregation where buckets are dynamically built - one per unique value. */ terms?: AggregationsTermsAggregation + /** The time series aggregation queries data created using a time series index. + * This is typically data such as metrics or other data streams with a time component, and requires creating an index using the time series mode. + * @experimental */ time_series?: AggregationsTimeSeriesAggregation + /** A metric aggregation that returns the top matching documents per bucket. */ top_hits?: AggregationsTopHitsAggregation + /** A metrics aggregation that performs a statistical hypothesis test in which the test statistic follows a Student’s t-distribution under the null hypothesis on numeric values extracted from the aggregated documents. */ t_test?: AggregationsTTestAggregation + /** A metric aggregation that selects metrics from the document with the largest or smallest sort value. */ top_metrics?: AggregationsTopMetricsAggregation + /** A single-value metrics aggregation that counts the number of values that are extracted from the aggregated documents. */ value_count?: AggregationsValueCountAggregation + /** A single-value metrics aggregation that computes the weighted average of numeric values that are extracted from the aggregated documents. */ weighted_avg?: AggregationsWeightedAverageAggregation + /** A multi-bucket aggregation similar to the histogram, except instead of providing an interval to use as the width of each bucket, a target number of buckets is provided. */ variable_width_histogram?: AggregationsVariableWidthHistogramAggregation } export interface AggregationsAggregationRange { + /** Start of the range (inclusive). */ from?: double | null + /** Custom key to return the range with. */ key?: string + /** End of the range (exclusive). */ to?: double | null } @@ -3755,14 +4978,24 @@ export interface AggregationsAutoDateHistogramAggregate extends AggregationsMult } export interface AggregationsAutoDateHistogramAggregation extends AggregationsBucketAggregationBase { + /** The target number of buckets. */ buckets?: integer + /** The field on which to run the aggregation. */ field?: Field + /** The date format used to format `key_as_string` in the response. + * If no `format` is specified, the first date format specified in the field mapping is used. */ format?: string + /** The minimum rounding interval. + * This can make the collection process more efficient, as the aggregation will not attempt to round at any interval lower than `minimum_interval`. */ minimum_interval?: AggregationsMinimumInterval + /** The value to apply to documents that do not have a value. + * By default, documents without a value are ignored. */ missing?: DateTime + /** Time zone specified as a ISO 8601 UTC offset. */ offset?: string params?: Record - script?: Script | string + script?: Script | ScriptSource + /** Time zone ID. */ time_zone?: TimeZone } @@ -3793,6 +5026,7 @@ export interface AggregationsBoxPlotAggregate extends AggregationsAggregateBase } export interface AggregationsBoxplotAggregation extends AggregationsMetricAggregationBase { + /** Limits the maximum number of nodes used by the underlying TDigest algorithm to `20 * compression`, enabling control of memory usage and approximation error. */ compression?: double } @@ -3800,26 +5034,49 @@ export interface AggregationsBucketAggregationBase { } export interface AggregationsBucketCorrelationAggregation extends AggregationsBucketPathAggregation { + /** The correlation function to execute. */ function: AggregationsBucketCorrelationFunction } export interface AggregationsBucketCorrelationFunction { + /** The configuration to calculate a count correlation. This function is designed for determining the correlation of a term value and a given metric. */ count_correlation: AggregationsBucketCorrelationFunctionCountCorrelation } export interface AggregationsBucketCorrelationFunctionCountCorrelation { + /** The indicator with which to correlate the configured `bucket_path` values. */ indicator: AggregationsBucketCorrelationFunctionCountCorrelationIndicator } export interface AggregationsBucketCorrelationFunctionCountCorrelationIndicator { + /** The total number of documents that initially created the expectations. It’s required to be greater + * than or equal to the sum of all values in the buckets_path as this is the originating superset of data + * to which the term values are correlated. */ doc_count: integer + /** An array of numbers with which to correlate the configured `bucket_path` values. + * The length of this value must always equal the number of buckets returned by the `bucket_path`. */ expectations: double[] + /** An array of fractions to use when averaging and calculating variance. This should be used if + * the pre-calculated data and the buckets_path have known gaps. The length of fractions, if provided, + * must equal expectations. */ fractions?: double[] } export interface AggregationsBucketKsAggregation extends AggregationsBucketPathAggregation { + /** A list of string values indicating which K-S test alternative to calculate. The valid values + * are: "greater", "less", "two_sided". This parameter is key for determining the K-S statistic used + * when calculating the K-S test. Default value is all possible alternative hypotheses. */ alternative?: string[] + /** A list of doubles indicating the distribution of the samples with which to compare to the `buckets_path` results. + * In typical usage this is the overall proportion of documents in each bucket, which is compared with the actual + * document proportions in each bucket from the sibling aggregation counts. The default is to assume that overall + * documents are uniformly distributed on these buckets, which they would be if one used equal percentiles of a + * metric to define the bucket end points. */ fractions?: double[] + /** Indicates the sampling methodology when calculating the K-S test. Note, this is sampling of the returned values. + * This determines the cumulative distribution function (CDF) points used comparing the two samples. Default is + * `upper_tail`, which emphasizes the upper end of the CDF points. Valid options are: `upper_tail`, `uniform`, + * and `lower_tail`. */ sampling_method?: string } @@ -3828,21 +5085,29 @@ export interface AggregationsBucketMetricValueAggregate extends AggregationsSing } export interface AggregationsBucketPathAggregation { + /** Path to the buckets that contain one set of values to correlate. */ buckets_path?: AggregationsBucketsPath } export interface AggregationsBucketScriptAggregation extends AggregationsPipelineAggregationBase { - script?: Script | string + /** The script to run for this aggregation. */ + script?: Script | ScriptSource } export interface AggregationsBucketSelectorAggregation extends AggregationsPipelineAggregationBase { - script?: Script | string + /** The script to run for this aggregation. */ + script?: Script | ScriptSource } export interface AggregationsBucketSortAggregation { + /** Buckets in positions prior to `from` will be truncated. */ from?: integer + /** The policy to apply when gaps are found in the data. */ gap_policy?: AggregationsGapPolicy + /** The number of buckets to return. + * Defaults to all buckets of the parent aggregation. */ size?: integer + /** The list of fields to sort on. */ sort?: Sort } @@ -3857,30 +5122,58 @@ export interface AggregationsCardinalityAggregate extends AggregationsAggregateB } export interface AggregationsCardinalityAggregation extends AggregationsMetricAggregationBase { + /** A unique count below which counts are expected to be close to accurate. + * This allows to trade memory for accuracy. */ precision_threshold?: integer rehash?: boolean + /** Mechanism by which cardinality aggregations is run. */ execution_hint?: AggregationsCardinalityExecutionMode } export type AggregationsCardinalityExecutionMode = 'global_ordinals' | 'segment_ordinals' | 'direct' | 'save_memory_heuristic' | 'save_time_heuristic' export interface AggregationsCategorizeTextAggregation { + /** The semi-structured text field to categorize. */ field: Field + /** The maximum number of unique tokens at any position up to max_matched_tokens. Must be larger than 1. + * Smaller values use less memory and create fewer categories. Larger values will use more memory and + * create narrower categories. Max allowed value is 100. */ max_unique_tokens?: integer + /** The maximum number of token positions to match on before attempting to merge categories. Larger + * values will use more memory and create narrower categories. Max allowed value is 100. */ max_matched_tokens?: integer + /** The minimum percentage of tokens that must match for text to be added to the category bucket. Must + * be between 1 and 100. The larger the value the narrower the categories. Larger values will increase memory + * usage and create narrower categories. */ similarity_threshold?: integer + /** This property expects an array of regular expressions. The expressions are used to filter out matching + * sequences from the categorization field values. You can use this functionality to fine tune the categorization + * by excluding sequences from consideration when categories are defined. For example, you can exclude SQL + * statements that appear in your log files. This property cannot be used at the same time as categorization_analyzer. + * If you only want to define simple regular expression filters that are applied prior to tokenization, setting + * this property is the easiest method. If you also want to customize the tokenizer or post-tokenization filtering, + * use the categorization_analyzer property instead and include the filters as pattern_replace character filters. */ categorization_filters?: string[] + /** The categorization analyzer specifies how the text is analyzed and tokenized before being categorized. + * The syntax is very similar to that used to define the analyzer in the analyze API. This property + * cannot be used at the same time as `categorization_filters`. */ categorization_analyzer?: AggregationsCategorizeTextAnalyzer + /** The number of categorization buckets to return from each shard before merging all the results. */ shard_size?: integer + /** The number of buckets to return. */ size?: integer + /** The minimum number of documents in a bucket to be returned to the results. */ min_doc_count?: integer + /** The minimum number of documents in a bucket to be returned from the shard before merging. */ shard_min_doc_count?: integer } export type AggregationsCategorizeTextAnalyzer = string | AggregationsCustomCategorizeTextAnalyzer export interface AggregationsChiSquareHeuristic { + /** Set to `false` if you defined a custom background filter that represents a different set of documents that you want to compare to. */ background_is_superset: boolean + /** Set to `false` to filter out the terms that appear less often in the subset than in documents outside the subset. */ include_negatives: boolean } @@ -3890,6 +5183,7 @@ export type AggregationsChildrenAggregate = AggregationsChildrenAggregateKeys & { [property: string]: AggregationsAggregate | long | Metadata } export interface AggregationsChildrenAggregation extends AggregationsBucketAggregationBase { + /** The child type that should be selected. */ type?: RelationName } @@ -3900,24 +5194,34 @@ export interface AggregationsCompositeAggregate extends AggregationsMultiBucketA export type AggregationsCompositeAggregateKey = Record export interface AggregationsCompositeAggregation extends AggregationsBucketAggregationBase { + /** When paginating, use the `after_key` value returned in the previous response to retrieve the next page. */ after?: AggregationsCompositeAggregateKey + /** The number of composite buckets that should be returned. */ size?: integer + /** The value sources used to build composite buckets. + * Keys are returned in the order of the `sources` definition. */ sources?: Record[] } export interface AggregationsCompositeAggregationBase { + /** Either `field` or `script` must be present */ field?: Field missing_bucket?: boolean missing_order?: AggregationsMissingOrder - script?: Script | string + /** Either `field` or `script` must be present */ + script?: Script | ScriptSource value_type?: AggregationsValueType order?: SortOrder } export interface AggregationsCompositeAggregationSource { + /** A terms aggregation. */ terms?: AggregationsCompositeTermsAggregation + /** A histogram aggregation. */ histogram?: AggregationsCompositeHistogramAggregation + /** A date histogram aggregation. */ date_histogram?: AggregationsCompositeDateHistogramAggregation + /** A geotile grid aggregation. */ geotile_grid?: AggregationsCompositeGeoTileGridAggregation } @@ -3929,7 +5233,9 @@ export type AggregationsCompositeBucket = AggregationsCompositeBucketKeys export interface AggregationsCompositeDateHistogramAggregation extends AggregationsCompositeAggregationBase { format?: string + /** Either `calendar_interval` or `fixed_interval` must be present */ calendar_interval?: DurationLarge + /** Either `calendar_interval` or `fixed_interval` must be present */ fixed_interval?: DurationLarge offset?: Duration time_zone?: TimeZone @@ -3968,20 +5274,37 @@ export interface AggregationsDateHistogramAggregate extends AggregationsMultiBuc } export interface AggregationsDateHistogramAggregation extends AggregationsBucketAggregationBase { + /** Calendar-aware interval. + * Can be specified using the unit name, such as `month`, or as a single unit quantity, such as `1M`. */ calendar_interval?: AggregationsCalendarInterval + /** Enables extending the bounds of the histogram beyond the data itself. */ extended_bounds?: AggregationsExtendedBounds + /** Limits the histogram to specified bounds. */ hard_bounds?: AggregationsExtendedBounds + /** The date field whose values are use to build a histogram. */ field?: Field + /** Fixed intervals: a fixed number of SI units and never deviate, regardless of where they fall on the calendar. */ fixed_interval?: Duration + /** The date format used to format `key_as_string` in the response. + * If no `format` is specified, the first date format specified in the field mapping is used. */ format?: string interval?: Duration + /** Only returns buckets that have `min_doc_count` number of documents. + * By default, all buckets between the first bucket that matches documents and the last one are returned. */ min_doc_count?: integer + /** The value to apply to documents that do not have a value. + * By default, documents without a value are ignored. */ missing?: DateTime + /** Changes the start value of each bucket by the specified positive (`+`) or negative offset (`-`) duration. */ offset?: Duration + /** The sort order of the returned buckets. */ order?: AggregationsAggregateOrder params?: Record - script?: Script | string + script?: Script | ScriptSource + /** Time zone used for bucketing and rounding. + * Defaults to Coordinated Universal Time (UTC). */ time_zone?: TimeZone + /** Set to `true` to associate a unique string key with each bucket and return the ranges as a hash rather than an array. */ keyed?: boolean } @@ -3996,17 +5319,27 @@ export interface AggregationsDateRangeAggregate extends AggregationsRangeAggrega } export interface AggregationsDateRangeAggregation extends AggregationsBucketAggregationBase { + /** The date field whose values are use to build ranges. */ field?: Field + /** The date format used to format `from` and `to` in the response. */ format?: string + /** The value to apply to documents that do not have a value. + * By default, documents without a value are ignored. */ missing?: AggregationsMissing + /** Array of date ranges. */ ranges?: AggregationsDateRangeExpression[] + /** Time zone used to convert dates from another time zone to UTC. */ time_zone?: TimeZone + /** Set to `true` to associate a unique string key with each bucket and returns the ranges as a hash rather than an array. */ keyed?: boolean } export interface AggregationsDateRangeExpression { + /** Start of the range (inclusive). */ from?: AggregationsFieldDateMath + /** Custom key to return the range with. */ key?: string + /** End of the range (exclusive). */ to?: AggregationsFieldDateMath } @@ -4019,10 +5352,14 @@ export interface AggregationsDerivativeAggregation extends AggregationsPipelineA } export interface AggregationsDiversifiedSamplerAggregation extends AggregationsBucketAggregationBase { + /** The type of value used for de-duplication. */ execution_hint?: AggregationsSamplerAggregationExecutionHint + /** Limits how many documents are permitted per choice of de-duplicating value. */ max_docs_per_value?: integer - script?: Script | string + script?: Script | ScriptSource + /** Limits how many top-scoring documents are collected in the sample processed on each shard. */ shard_size?: integer + /** The field used to provide values used for de-duplication. */ field?: Field } @@ -4046,7 +5383,9 @@ export interface AggregationsEwmaMovingAverageAggregation extends AggregationsMo } export interface AggregationsExtendedBounds { + /** Maximum value for the bound. */ max?: T + /** Minimum value for the bound. */ min?: T } @@ -4068,6 +5407,7 @@ export interface AggregationsExtendedStatsAggregate extends AggregationsStatsAgg } export interface AggregationsExtendedStatsAggregation extends AggregationsFormatMetricAggregationBase { + /** The number of standard deviations above/below the mean to display. */ sigma?: double } @@ -4075,6 +5415,7 @@ export interface AggregationsExtendedStatsBucketAggregate extends AggregationsEx } export interface AggregationsExtendedStatsBucketAggregation extends AggregationsPipelineAggregationBase { + /** The number of standard deviations above/below the mean to display. */ sigma?: double } @@ -4089,9 +5430,14 @@ export interface AggregationsFiltersAggregate extends AggregationsMultiBucketAgg } export interface AggregationsFiltersAggregation extends AggregationsBucketAggregationBase { + /** Collection of queries from which to build buckets. */ filters?: AggregationsBuckets + /** Set to `true` to add a bucket to the response which will contain all documents that do not match any of the given filters. */ other_bucket?: boolean + /** The key with which the other bucket is returned. */ other_bucket_key?: string + /** By default, the named filters aggregation returns the buckets as an object. + * Set to `false` to return the buckets as an array of objects. */ keyed?: boolean } @@ -4112,10 +5458,15 @@ export interface AggregationsFrequentItemSetsAggregate extends AggregationsMulti } export interface AggregationsFrequentItemSetsAggregation { + /** Fields to analyze. */ fields: AggregationsFrequentItemSetsField[] + /** The minimum size of one item set. */ minimum_set_size?: integer + /** The minimum support of one item set. */ minimum_support?: double + /** The number of top item sets to return. */ size?: integer + /** Query that filters documents from analysis. */ filter?: QueryDslQueryContainer } @@ -4128,7 +5479,11 @@ export type AggregationsFrequentItemSetsBucket = AggregationsFrequentItemSetsBuc export interface AggregationsFrequentItemSetsField { field: Field + /** Values to exclude. + * Can be regular expression strings or arrays of strings of exact terms. */ exclude?: AggregationsTermsExclude + /** Values to include. + * Can be regular expression strings or arrays of strings of exact terms. */ include?: AggregationsTermsInclude } @@ -4139,6 +5494,7 @@ export interface AggregationsGeoBoundsAggregate extends AggregationsAggregateBas } export interface AggregationsGeoBoundsAggregation extends AggregationsMetricAggregationBase { + /** Specifies whether the bounding box should be allowed to overlap the international date line. */ wrap_longitude?: boolean } @@ -4156,10 +5512,15 @@ export interface AggregationsGeoDistanceAggregate extends AggregationsRangeAggre } export interface AggregationsGeoDistanceAggregation extends AggregationsBucketAggregationBase { + /** The distance calculation type. */ distance_type?: GeoDistanceType + /** A field of type `geo_point` used to evaluate the distance. */ field?: Field + /** The origin used to evaluate the distance. */ origin?: GeoLocation + /** An array of ranges used to bucket documents. */ ranges?: AggregationsAggregationRange[] + /** The distance unit. */ unit?: DistanceUnit } @@ -4167,10 +5528,17 @@ export interface AggregationsGeoHashGridAggregate extends AggregationsMultiBucke } export interface AggregationsGeoHashGridAggregation extends AggregationsBucketAggregationBase { + /** The bounding box to filter the points in each bucket. */ bounds?: GeoBounds + /** Field containing indexed `geo_point` or `geo_shape` values. + * If the field contains an array, `geohash_grid` aggregates all array values. */ field?: Field + /** The string length of the geohashes used to define cells/buckets in the results. */ precision?: GeoHashPrecision + /** Allows for more accurate counting of the top cells returned in the final result the aggregation. + * Defaults to returning `max(10,(size x number-of-shards))` buckets from each shard. */ shard_size?: integer + /** The maximum number of geohash buckets to return. */ size?: integer } @@ -4196,18 +5564,27 @@ export interface AggregationsGeoLineAggregate extends AggregationsAggregateBase } export interface AggregationsGeoLineAggregation { + /** The name of the geo_point field. */ point: AggregationsGeoLinePoint + /** The name of the numeric field to use as the sort key for ordering the points. + * When the `geo_line` aggregation is nested inside a `time_series` aggregation, this field defaults to `@timestamp`, and any other value will result in error. */ sort: AggregationsGeoLineSort + /** When `true`, returns an additional array of the sort values in the feature properties. */ include_sort?: boolean + /** The order in which the line is sorted (ascending or descending). */ sort_order?: SortOrder + /** The maximum length of the line represented in the aggregation. + * Valid sizes are between 1 and 10000. */ size?: integer } export interface AggregationsGeoLinePoint { + /** The name of the geo_point field. */ field: Field } export interface AggregationsGeoLineSort { + /** The name of the numeric field to use as the sort key for ordering the points. */ field: Field } @@ -4215,10 +5592,18 @@ export interface AggregationsGeoTileGridAggregate extends AggregationsMultiBucke } export interface AggregationsGeoTileGridAggregation extends AggregationsBucketAggregationBase { + /** Field containing indexed `geo_point` or `geo_shape` values. + * If the field contains an array, `geotile_grid` aggregates all array values. */ field?: Field + /** Integer zoom of the key used to define cells/buckets in the results. + * Values outside of the range [0,29] will be rejected. */ precision?: GeoTilePrecision + /** Allows for more accurate counting of the top cells returned in the final result the aggregation. + * Defaults to returning `max(10,(size x number-of-shards))` buckets from each shard. */ shard_size?: integer + /** The maximum number of buckets to return. */ size?: integer + /** A bounding box to filter the geo-points or geo-shapes in each bucket. */ bounds?: GeoBounds } @@ -4229,10 +5614,17 @@ export type AggregationsGeoTileGridBucket = AggregationsGeoTileGridBucketKeys & { [property: string]: AggregationsAggregate | GeoTile | long } export interface AggregationsGeohexGridAggregation extends AggregationsBucketAggregationBase { + /** Field containing indexed `geo_point` or `geo_shape` values. + * If the field contains an array, `geohex_grid` aggregates all array values. */ field: Field + /** Integer zoom of the key used to defined cells or buckets + * in the results. Value should be between 0-15. */ precision?: integer + /** Bounding box used to filter the geo-points in each bucket. */ bounds?: GeoBounds + /** Maximum number of buckets to return. */ size?: integer + /** Number of buckets returned from each shard. */ shard_size?: integer } @@ -4245,10 +5637,12 @@ export interface AggregationsGlobalAggregation extends AggregationsBucketAggrega } export interface AggregationsGoogleNormalizedDistanceHeuristic { + /** Set to `false` if you defined a custom background filter that represents a different set of documents that you want to compare to. */ background_is_superset?: boolean } export interface AggregationsHdrMethod { + /** Specifies the resolution of values for the histogram in number of significant digits. */ number_of_significant_value_digits?: integer } @@ -4262,16 +5656,31 @@ export interface AggregationsHistogramAggregate extends AggregationsMultiBucketA } export interface AggregationsHistogramAggregation extends AggregationsBucketAggregationBase { + /** Enables extending the bounds of the histogram beyond the data itself. */ extended_bounds?: AggregationsExtendedBounds + /** Limits the range of buckets in the histogram. + * It is particularly useful in the case of open data ranges that can result in a very large number of buckets. */ hard_bounds?: AggregationsExtendedBounds + /** The name of the field to aggregate on. */ field?: Field + /** The interval for the buckets. + * Must be a positive decimal. */ interval?: double + /** Only returns buckets that have `min_doc_count` number of documents. + * By default, the response will fill gaps in the histogram with empty buckets. */ min_doc_count?: integer + /** The value to apply to documents that do not have a value. + * By default, documents without a value are ignored. */ missing?: double + /** By default, the bucket keys start with 0 and then continue in even spaced steps of `interval`. + * The bucket boundaries can be shifted by using the `offset` option. */ offset?: double + /** The sort order of the returned buckets. + * By default, the returned buckets are sorted by their key ascending. */ order?: AggregationsAggregateOrder - script?: Script | string + script?: Script | ScriptSource format?: string + /** If `true`, returns buckets as a hash instead of an array, keyed by the bucket keys. */ keyed?: boolean } @@ -4318,7 +5727,9 @@ export type AggregationsInferenceAggregate = AggregationsInferenceAggregateKeys & { [property: string]: any } export interface AggregationsInferenceAggregation extends AggregationsPipelineAggregationBase { + /** The ID or alias for the trained model. */ model_id: Name + /** Contains the inference type and its options. */ inference_config?: AggregationsInferenceConfigContainer } @@ -4328,7 +5739,9 @@ export interface AggregationsInferenceClassImportance { } export interface AggregationsInferenceConfigContainer { + /** Regression configuration for inference. */ regression?: MlRegressionInferenceOptions + /** Classification configuration for inference. */ classification?: MlClassificationInferenceOptions } @@ -4348,11 +5761,18 @@ export interface AggregationsIpPrefixAggregate extends AggregationsMultiBucketAg } export interface AggregationsIpPrefixAggregation extends AggregationsBucketAggregationBase { + /** The IP address field to aggregation on. The field mapping type must be `ip`. */ field: Field + /** Length of the network prefix. For IPv4 addresses the accepted range is [0, 32]. + * For IPv6 addresses the accepted range is [0, 128]. */ prefix_length: integer + /** Defines whether the prefix applies to IPv6 addresses. */ is_ipv6?: boolean + /** Defines whether the prefix length is appended to IP address keys in the response. */ append_prefix_length?: boolean + /** Defines whether buckets are returned as a hash rather than an array in the response. */ keyed?: boolean + /** Minimum number of documents in a bucket for it to be included in the response. */ min_doc_count?: long } @@ -4369,13 +5789,18 @@ export interface AggregationsIpRangeAggregate extends AggregationsMultiBucketAgg } export interface AggregationsIpRangeAggregation extends AggregationsBucketAggregationBase { + /** The date field whose values are used to build ranges. */ field?: Field + /** Array of IP ranges. */ ranges?: AggregationsIpRangeAggregationRange[] } export interface AggregationsIpRangeAggregationRange { + /** Start of the range. */ from?: string | null + /** IP range defined as a CIDR mask. */ mask?: string + /** End of the range. */ to?: string | null } @@ -4415,7 +5840,10 @@ export type AggregationsLongTermsBucket = AggregationsLongTermsBucketKeys & { [property: string]: AggregationsAggregate | long | string } export interface AggregationsMatrixAggregation { + /** An array of fields for computing the statistics. */ fields?: Fields + /** The value to apply to documents that do not have a value. + * By default, documents without a value are ignored. */ missing?: Record } @@ -4425,6 +5853,7 @@ export interface AggregationsMatrixStatsAggregate extends AggregationsAggregateB } export interface AggregationsMatrixStatsAggregation extends AggregationsMatrixAggregation { + /** Array value the aggregation will use for array or multi-valued fields. */ mode?: SortMode } @@ -4452,13 +5881,17 @@ export interface AggregationsMedianAbsoluteDeviationAggregate extends Aggregatio } export interface AggregationsMedianAbsoluteDeviationAggregation extends AggregationsFormatMetricAggregationBase { + /** Limits the maximum number of nodes used by the underlying TDigest algorithm to `20 * compression`, enabling control of memory usage and approximation error. */ compression?: double } export interface AggregationsMetricAggregationBase { + /** The field on which to run the aggregation. */ field?: Field + /** The value to apply to documents that do not have a value. + * By default, documents without a value are ignored. */ missing?: AggregationsMissing - script?: Script | string + script?: Script | ScriptSource } export interface AggregationsMinAggregate extends AggregationsSingleMetricAggregateBase { @@ -4480,6 +5913,7 @@ export type AggregationsMissingAggregate = AggregationsMissingAggregateKeys & { [property: string]: AggregationsAggregate | long | Metadata } export interface AggregationsMissingAggregation extends AggregationsBucketAggregationBase { + /** The name of the field. */ field?: Field missing?: AggregationsMissing } @@ -4495,13 +5929,20 @@ export interface AggregationsMovingAverageAggregationBase extends AggregationsPi } export interface AggregationsMovingFunctionAggregation extends AggregationsPipelineAggregationBase { + /** The script that should be executed on each window of data. */ script?: string + /** By default, the window consists of the last n values excluding the current bucket. + * Increasing `shift` by 1, moves the starting window position by 1 to the right. */ shift?: integer + /** The size of window to "slide" across the histogram. */ window?: integer } export interface AggregationsMovingPercentilesAggregation extends AggregationsPipelineAggregationBase { + /** The size of window to "slide" across the histogram. */ window?: integer + /** By default, the window consists of the last n values excluding the current bucket. + * Increasing `shift` by 1, moves the starting window position by 1 to the right. */ shift?: integer keyed?: boolean } @@ -4515,7 +5956,10 @@ export interface AggregationsMultiBucketBase { } export interface AggregationsMultiTermLookup { + /** A fields from which to retrieve terms. */ field: Field + /** The value to apply to documents that do not have a value. + * By default, documents without a value are ignored. */ missing?: AggregationsMissing } @@ -4523,13 +5967,23 @@ export interface AggregationsMultiTermsAggregate extends AggregationsTermsAggreg } export interface AggregationsMultiTermsAggregation extends AggregationsBucketAggregationBase { + /** Specifies the strategy for data collection. */ collect_mode?: AggregationsTermsAggregationCollectMode + /** Specifies the sort order of the buckets. + * Defaults to sorting by descending document count. */ order?: AggregationsAggregateOrder + /** The minimum number of documents in a bucket for it to be returned. */ min_doc_count?: long + /** The minimum number of documents in a bucket on each shard for it to be returned. */ shard_min_doc_count?: long + /** The number of candidate terms produced by each shard. + * By default, `shard_size` will be automatically estimated based on the number of shards and the `size` parameter. */ shard_size?: integer + /** Calculates the doc count error on per term basis. */ show_term_doc_count_error?: boolean + /** The number of term buckets should be returned out of the overall terms list. */ size?: integer + /** The field from which to generate sets of terms. */ terms: AggregationsMultiTermLookup[] } @@ -4542,7 +5996,9 @@ export type AggregationsMultiTermsBucket = AggregationsMultiTermsBucketKeys & { [property: string]: AggregationsAggregate | FieldValue[] | string | long } export interface AggregationsMutualInformationHeuristic { + /** Set to `false` if you defined a custom background filter that represents a different set of documents that you want to compare to. */ background_is_superset?: boolean + /** Set to `false` to filter out the terms that appear less often in the subset than in documents outside the subset. */ include_negatives?: boolean } @@ -4552,10 +6008,12 @@ export type AggregationsNestedAggregate = AggregationsNestedAggregateKeys & { [property: string]: AggregationsAggregate | long | Metadata } export interface AggregationsNestedAggregation extends AggregationsBucketAggregationBase { + /** The path to the field of type `nested`. */ path?: Field } export interface AggregationsNormalizeAggregation extends AggregationsPipelineAggregationBase { + /** The specific method to apply. */ method?: AggregationsNormalizeMethod } @@ -4567,6 +6025,7 @@ export type AggregationsParentAggregate = AggregationsParentAggregateKeys & { [property: string]: AggregationsAggregate | long | Metadata } export interface AggregationsParentAggregation extends AggregationsBucketAggregationBase { + /** The child type that should be selected. */ type?: RelationName } @@ -4574,9 +6033,14 @@ export interface AggregationsPercentageScoreHeuristic { } export interface AggregationsPercentileRanksAggregation extends AggregationsFormatMetricAggregationBase { + /** By default, the aggregation associates a unique string key with each bucket and returns the ranges as a hash rather than an array. + * Set to `false` to disable this behavior. */ keyed?: boolean + /** An array of values for which to calculate the percentile ranks. */ values?: double[] | null + /** Uses the alternative High Dynamic Range Histogram algorithm to calculate percentile ranks. */ hdr?: AggregationsHdrMethod + /** Sets parameters for the default TDigest algorithm used to calculate percentile ranks. */ tdigest?: AggregationsTDigest } @@ -4587,9 +6051,14 @@ export interface AggregationsPercentilesAggregateBase extends AggregationsAggreg } export interface AggregationsPercentilesAggregation extends AggregationsFormatMetricAggregationBase { + /** By default, the aggregation associates a unique string key with each bucket and returns the ranges as a hash rather than an array. + * Set to `false` to disable this behavior. */ keyed?: boolean + /** The percentiles to calculate. */ percents?: double[] + /** Uses the alternative High Dynamic Range Histogram algorithm to calculate percentiles. */ hdr?: AggregationsHdrMethod + /** Sets parameters for the default TDigest algorithm used to calculate percentiles. */ tdigest?: AggregationsTDigest } @@ -4597,17 +6066,28 @@ export interface AggregationsPercentilesBucketAggregate extends AggregationsPerc } export interface AggregationsPercentilesBucketAggregation extends AggregationsPipelineAggregationBase { + /** The list of percentiles to calculate. */ percents?: double[] } export interface AggregationsPipelineAggregationBase extends AggregationsBucketPathAggregation { + /** `DecimalFormat` pattern for the output value. + * If specified, the formatted value is returned in the aggregation’s `value_as_string` property. */ format?: string + /** Policy to apply when gaps are found in the data. */ gap_policy?: AggregationsGapPolicy } export interface AggregationsRandomSamplerAggregation extends AggregationsBucketAggregationBase { + /** The probability that a document will be included in the aggregated data. + * Must be greater than 0, less than 0.5, or exactly 1. + * The lower the probability, the fewer documents are matched. */ probability: double + /** The seed to generate the random sampling of documents. + * When a seed is provided, the random subset of documents is the same between calls. */ seed?: integer + /** When combined with seed, setting shard_seed ensures 100% consistent sampling over shards where data is exactly the same. + * @remarks This property is not supported on Elastic Cloud Serverless. */ shard_seed?: integer } @@ -4615,10 +6095,15 @@ export interface AggregationsRangeAggregate extends AggregationsMultiBucketAggre } export interface AggregationsRangeAggregation extends AggregationsBucketAggregationBase { + /** The date field whose values are use to build ranges. */ field?: Field + /** The value to apply to documents that do not have a value. + * By default, documents without a value are ignored. */ missing?: integer + /** An array of ranges used to bucket documents. */ ranges?: AggregationsAggregationRange[] - script?: Script | string + script?: Script | ScriptSource + /** Set to `true` to associate a unique string key with each bucket and return the ranges as a hash rather than an array. */ keyed?: boolean format?: string } @@ -4628,17 +6113,26 @@ export interface AggregationsRangeBucketKeys extends AggregationsMultiBucketBase to?: double from_as_string?: string to_as_string?: string + /** The bucket key. Present if the aggregation is _not_ keyed */ key?: string } export type AggregationsRangeBucket = AggregationsRangeBucketKeys & { [property: string]: AggregationsAggregate | double | string | long } export interface AggregationsRareTermsAggregation extends AggregationsBucketAggregationBase { + /** Terms that should be excluded from the aggregation. */ exclude?: AggregationsTermsExclude + /** The field from which to return rare terms. */ field?: Field + /** Terms that should be included in the aggregation. */ include?: AggregationsTermsInclude + /** The maximum number of documents a term should appear in. */ max_doc_count?: long + /** The value to apply to documents that do not have a value. + * By default, documents without a value are ignored. */ missing?: AggregationsMissing + /** The precision of the internal CuckooFilters. + * Smaller precision leads to better approximation, but higher memory usage. */ precision?: double value_type?: string } @@ -4649,7 +6143,10 @@ export interface AggregationsRateAggregate extends AggregationsAggregateBase { } export interface AggregationsRateAggregation extends AggregationsFormatMetricAggregationBase { + /** The interval used to calculate the rate. + * By default, the interval of the `date_histogram` is used. */ unit?: AggregationsCalendarInterval + /** How the rate is calculated. */ mode?: AggregationsRateMode } @@ -4661,6 +6158,8 @@ export type AggregationsReverseNestedAggregate = AggregationsReverseNestedAggreg & { [property: string]: AggregationsAggregate | long | Metadata } export interface AggregationsReverseNestedAggregation extends AggregationsBucketAggregationBase { + /** Defines the nested object field that should be joined back to. + * The default is empty, which means that it joins back to the root/main document level. */ path?: Field } @@ -4670,13 +6169,14 @@ export type AggregationsSamplerAggregate = AggregationsSamplerAggregateKeys & { [property: string]: AggregationsAggregate | long | Metadata } export interface AggregationsSamplerAggregation extends AggregationsBucketAggregationBase { + /** Limits how many top-scoring documents are collected in the sample processed on each shard. */ shard_size?: integer } export type AggregationsSamplerAggregationExecutionHint = 'map' | 'global_ordinals' | 'bytes_hash' export interface AggregationsScriptedHeuristic { - script: Script | string + script: Script | ScriptSource } export interface AggregationsScriptedMetricAggregate extends AggregationsAggregateBase { @@ -4684,14 +6184,26 @@ export interface AggregationsScriptedMetricAggregate extends AggregationsAggrega } export interface AggregationsScriptedMetricAggregation extends AggregationsMetricAggregationBase { - combine_script?: Script | string - init_script?: Script | string - map_script?: Script | string + /** Runs once on each shard after document collection is complete. + * Allows the aggregation to consolidate the state returned from each shard. */ + combine_script?: Script | ScriptSource + /** Runs prior to any collection of documents. + * Allows the aggregation to set up any initial state. */ + init_script?: Script | ScriptSource + /** Run once per document collected. + * If no `combine_script` is specified, the resulting state needs to be stored in the `state` object. */ + map_script?: Script | ScriptSource + /** A global object with script parameters for `init`, `map` and `combine` scripts. + * It is shared between the scripts. */ params?: Record - reduce_script?: Script | string + /** Runs once on the coordinating node after all shards have returned their results. + * The script is provided with access to a variable `states`, which is an array of the result of the `combine_script` on each shard. */ + reduce_script?: Script | ScriptSource } export interface AggregationsSerialDifferencingAggregation extends AggregationsPipelineAggregationBase { + /** The historical bucket to subtract from the current value. + * Must be a positive, non-zero integer. */ lag?: integer } @@ -4720,20 +6232,37 @@ export interface AggregationsSignificantTermsAggregateBase extends } export interface AggregationsSignificantTermsAggregation extends AggregationsBucketAggregationBase { + /** A background filter that can be used to focus in on significant terms within a narrower context, instead of the entire index. */ background_filter?: QueryDslQueryContainer + /** Use Chi square, as described in "Information Retrieval", Manning et al., Chapter 13.5.2, as the significance score. */ chi_square?: AggregationsChiSquareHeuristic + /** Terms to exclude. */ exclude?: AggregationsTermsExclude + /** Mechanism by which the aggregation should be executed: using field values directly or using global ordinals. */ execution_hint?: AggregationsTermsAggregationExecutionHint + /** The field from which to return significant terms. */ field?: Field + /** Use Google normalized distance as described in "The Google Similarity Distance", Cilibrasi and Vitanyi, 2007, as the significance score. */ gnd?: AggregationsGoogleNormalizedDistanceHeuristic + /** Terms to include. */ include?: AggregationsTermsInclude + /** Use JLH score as the significance score. */ jlh?: EmptyObject + /** Only return terms that are found in more than `min_doc_count` hits. */ min_doc_count?: long + /** Use mutual information as described in "Information Retrieval", Manning et al., Chapter 13.5.1, as the significance score. */ mutual_information?: AggregationsMutualInformationHeuristic + /** A simple calculation of the number of documents in the foreground sample with a term divided by the number of documents in the background with the term. */ percentage?: AggregationsPercentageScoreHeuristic + /** Customized score, implemented via a script. */ script_heuristic?: AggregationsScriptedHeuristic + /** Regulates the certainty a shard has if the term should actually be added to the candidate list or not with respect to the `min_doc_count`. + * Terms will only be considered if their local shard frequency within the set is higher than the `shard_min_doc_count`. */ shard_min_doc_count?: long + /** Can be used to control the volumes of candidate terms produced by each shard. + * By default, `shard_size` will be automatically estimated based on the number of shards and the `size` parameter. */ shard_size?: integer + /** The number of buckets returned out of the overall terms list. */ size?: integer } @@ -4743,22 +6272,41 @@ export interface AggregationsSignificantTermsBucketBase extends AggregationsMult } export interface AggregationsSignificantTextAggregation extends AggregationsBucketAggregationBase { + /** A background filter that can be used to focus in on significant terms within a narrower context, instead of the entire index. */ background_filter?: QueryDslQueryContainer + /** Use Chi square, as described in "Information Retrieval", Manning et al., Chapter 13.5.2, as the significance score. */ chi_square?: AggregationsChiSquareHeuristic + /** Values to exclude. */ exclude?: AggregationsTermsExclude + /** Determines whether the aggregation will use field values directly or global ordinals. */ execution_hint?: AggregationsTermsAggregationExecutionHint + /** The field from which to return significant text. */ field?: Field + /** Whether to out duplicate text to deal with noisy data. */ filter_duplicate_text?: boolean + /** Use Google normalized distance as described in "The Google Similarity Distance", Cilibrasi and Vitanyi, 2007, as the significance score. */ gnd?: AggregationsGoogleNormalizedDistanceHeuristic + /** Values to include. */ include?: AggregationsTermsInclude + /** Use JLH score as the significance score. */ jlh?: EmptyObject + /** Only return values that are found in more than `min_doc_count` hits. */ min_doc_count?: long + /** Use mutual information as described in "Information Retrieval", Manning et al., Chapter 13.5.1, as the significance score. */ mutual_information?: AggregationsMutualInformationHeuristic + /** A simple calculation of the number of documents in the foreground sample with a term divided by the number of documents in the background with the term. */ percentage?: AggregationsPercentageScoreHeuristic + /** Customized score, implemented via a script. */ script_heuristic?: AggregationsScriptedHeuristic + /** Regulates the certainty a shard has if the values should actually be added to the candidate list or not with respect to the min_doc_count. + * Values will only be considered if their local shard frequency within the set is higher than the `shard_min_doc_count`. */ shard_min_doc_count?: long + /** The number of candidate terms produced by each shard. + * By default, `shard_size` will be automatically estimated based on the number of shards and the `size` parameter. */ shard_size?: integer + /** The number of buckets returned out of the overall terms list. */ size?: integer + /** Overrides the JSON `_source` fields from which text will be analyzed. */ source_fields?: Fields } @@ -4775,6 +6323,8 @@ export interface AggregationsSingleBucketAggregateBase extends AggregationsAggre } export interface AggregationsSingleMetricAggregateBase extends AggregationsAggregateBase { + /** The metric value. A missing value generally means that there was no data to aggregate, + * unless specified otherwise. */ value: double | null value_as_string?: string } @@ -4840,6 +6390,7 @@ export interface AggregationsStringStatsAggregate extends AggregationsAggregateB } export interface AggregationsStringStatsAggregation extends AggregationsMetricAggregationBase { + /** Shows the probability distribution for all characters. */ show_distribution?: boolean } @@ -4862,6 +6413,7 @@ export interface AggregationsSumBucketAggregation extends AggregationsPipelineAg } export interface AggregationsTDigest { + /** Limits the maximum number of nodes used by the underlying TDigest algorithm to `20 * compression`, enabling control of memory usage and approximation error. */ compression?: integer } @@ -4877,8 +6429,11 @@ export interface AggregationsTTestAggregate extends AggregationsAggregateBase { } export interface AggregationsTTestAggregation { + /** Test population A. */ a?: AggregationsTestPopulation + /** Test population B. */ b?: AggregationsTestPopulation + /** The type of test. */ type?: AggregationsTTestType } @@ -4890,21 +6445,40 @@ export interface AggregationsTermsAggregateBase extends Aggre } export interface AggregationsTermsAggregation extends AggregationsBucketAggregationBase { + /** Determines how child aggregations should be calculated: breadth-first or depth-first. */ collect_mode?: AggregationsTermsAggregationCollectMode + /** Values to exclude. + * Accepts regular expressions and partitions. */ exclude?: AggregationsTermsExclude + /** Determines whether the aggregation will use field values directly or global ordinals. */ execution_hint?: AggregationsTermsAggregationExecutionHint + /** The field from which to return terms. */ field?: Field + /** Values to include. + * Accepts regular expressions and partitions. */ include?: AggregationsTermsInclude + /** Only return values that are found in more than `min_doc_count` hits. */ min_doc_count?: integer + /** The value to apply to documents that do not have a value. + * By default, documents without a value are ignored. */ missing?: AggregationsMissing missing_order?: AggregationsMissingOrder missing_bucket?: boolean + /** Coerced unmapped fields into the specified type. */ value_type?: string + /** Specifies the sort order of the buckets. + * Defaults to sorting by descending document count. */ order?: AggregationsAggregateOrder - script?: Script | string + script?: Script | ScriptSource + /** Regulates the certainty a shard has if the term should actually be added to the candidate list or not with respect to the `min_doc_count`. + * Terms will only be considered if their local shard frequency within the set is higher than the `shard_min_doc_count`. */ shard_min_doc_count?: long + /** The number of candidate terms produced by each shard. + * By default, `shard_size` will be automatically estimated based on the number of shards and the `size` parameter. */ shard_size?: integer + /** Set to `true` to return the `doc_count_error_upper_bound`, which is an upper bound to the error on the `doc_count` returned by each shard. */ show_term_doc_count_error?: boolean + /** The number of buckets returned out of the overall terms list. */ size?: integer format?: string } @@ -4922,13 +6496,17 @@ export type AggregationsTermsExclude = string | string[] export type AggregationsTermsInclude = string | string[] | AggregationsTermsPartition export interface AggregationsTermsPartition { + /** The number of partitions. */ num_partitions: long + /** The partition number for this request. */ partition: long } export interface AggregationsTestPopulation { + /** The field to aggregate. */ field: Field - script?: Script | string + script?: Script | ScriptSource + /** A filter used to define a set of records to run unpaired t-test on. */ filter?: QueryDslQueryContainer } @@ -4936,7 +6514,9 @@ export interface AggregationsTimeSeriesAggregate extends AggregationsMultiBucket } export interface AggregationsTimeSeriesAggregation extends AggregationsBucketAggregationBase { + /** The maximum number of results to return. */ size?: integer + /** Set to `true` to associate a unique string key with each bucket and returns the ranges as a hash rather than an array. */ keyed?: boolean } @@ -4951,18 +6531,33 @@ export interface AggregationsTopHitsAggregate extends AggregationsAggregateBase } export interface AggregationsTopHitsAggregation extends AggregationsMetricAggregationBase { + /** Fields for which to return doc values. */ docvalue_fields?: (QueryDslFieldAndFormat | Field)[] + /** If `true`, returns detailed information about score computation as part of a hit. */ explain?: boolean + /** Array of wildcard (*) patterns. The request returns values for field names + * matching these patterns in the hits.fields property of the response. */ fields?: (QueryDslFieldAndFormat | Field)[] + /** Starting document offset. */ from?: integer + /** Specifies the highlighter to use for retrieving highlighted snippets from one or more fields in the search results. */ highlight?: SearchHighlight + /** Returns the result of one or more script evaluations for each hit. */ script_fields?: Record + /** The maximum number of top matching hits to return per bucket. */ size?: integer + /** Sort order of the top matching hits. + * By default, the hits are sorted by the score of the main query. */ sort?: Sort + /** Selects the fields of the source that are returned. */ _source?: SearchSourceConfig + /** Returns values for the specified stored fields (fields that use the `store` mapping option). */ stored_fields?: Fields + /** If `true`, calculates and returns document scores, even if the scores are not used for sorting. */ track_scores?: boolean + /** If `true`, returns document version as part of a hit. */ version?: boolean + /** If `true`, returns sequence number and primary term of the last modification of each hit. */ seq_no_primary_term?: boolean } @@ -4976,12 +6571,16 @@ export interface AggregationsTopMetricsAggregate extends AggregationsAggregateBa } export interface AggregationsTopMetricsAggregation extends AggregationsMetricAggregationBase { + /** The fields of the top document to return. */ metrics?: AggregationsTopMetricsValue | AggregationsTopMetricsValue[] + /** The number of top documents from which to return metrics. */ size?: integer + /** The sort order of the documents. */ sort?: Sort } export interface AggregationsTopMetricsValue { + /** A field to return as a metric. */ field: Field } @@ -5011,11 +6610,17 @@ export interface AggregationsVariableWidthHistogramAggregate extends Aggregation } export interface AggregationsVariableWidthHistogramAggregation { + /** The name of the field. */ field?: Field + /** The target number of buckets. */ buckets?: integer + /** The number of buckets that the coordinating node will request from each shard. + * Defaults to `buckets * 50`. */ shard_size?: integer + /** Specifies the number of individual documents that will be stored in memory on a shard before the initial bucketing algorithm is run. + * Defaults to `min(10 * shard_size, 50000)`. */ initial_buffer?: integer - script?: Script | string + script?: Script | ScriptSource } export interface AggregationsVariableWidthHistogramBucketKeys extends AggregationsMultiBucketBase { @@ -5030,22 +6635,27 @@ export type AggregationsVariableWidthHistogramBucket = AggregationsVariableWidth & { [property: string]: AggregationsAggregate | double | string | long } export interface AggregationsWeightedAverageAggregation { + /** A numeric response formatter. */ format?: string + /** Configuration for the field that provides the values. */ value?: AggregationsWeightedAverageValue value_type?: AggregationsValueType + /** Configuration for the field or script that provides the weights. */ weight?: AggregationsWeightedAverageValue } export interface AggregationsWeightedAverageValue { + /** The field from which to extract the values or weights. */ field?: Field + /** A value or weight to use if the field is missing. */ missing?: double - script?: Script | string + script?: Script | ScriptSource } export interface AggregationsWeightedAvgAggregate extends AggregationsSingleMetricAggregateBase { } -export type AnalysisAnalyzer = AnalysisCustomAnalyzer | AnalysisFingerprintAnalyzer | AnalysisKeywordAnalyzer | AnalysisLanguageAnalyzer | AnalysisNoriAnalyzer | AnalysisPatternAnalyzer | AnalysisSimpleAnalyzer | AnalysisStandardAnalyzer | AnalysisStopAnalyzer | AnalysisWhitespaceAnalyzer | AnalysisIcuAnalyzer | AnalysisKuromojiAnalyzer | AnalysisSnowballAnalyzer | AnalysisArabicAnalyzer | AnalysisArmenianAnalyzer | AnalysisBasqueAnalyzer | AnalysisBengaliAnalyzer | AnalysisBrazilianAnalyzer | AnalysisBulgarianAnalyzer | AnalysisCatalanAnalyzer | AnalysisChineseAnalyzer | AnalysisCjkAnalyzer | AnalysisCzechAnalyzer | AnalysisDanishAnalyzer | AnalysisDutchAnalyzer | AnalysisEnglishAnalyzer | AnalysisEstonianAnalyzer | AnalysisFinnishAnalyzer | AnalysisFrenchAnalyzer | AnalysisGalicianAnalyzer | AnalysisGermanAnalyzer | AnalysisGreekAnalyzer | AnalysisHindiAnalyzer | AnalysisHungarianAnalyzer | AnalysisIndonesianAnalyzer | AnalysisIrishAnalyzer | AnalysisItalianAnalyzer | AnalysisLatvianAnalyzer | AnalysisLithuanianAnalyzer | AnalysisNorwegianAnalyzer | AnalysisPersianAnalyzer | AnalysisPortugueseAnalyzer | AnalysisRomanianAnalyzer | AnalysisRussianAnalyzer | AnalysisSerbianAnalyzer | AnalysisSoraniAnalyzer | AnalysisSpanishAnalyzer | AnalysisSwedishAnalyzer | AnalysisTurkishAnalyzer | AnalysisThaiAnalyzer +export type AnalysisAnalyzer = AnalysisCustomAnalyzer | AnalysisFingerprintAnalyzer | AnalysisKeywordAnalyzer | AnalysisNoriAnalyzer | AnalysisPatternAnalyzer | AnalysisSimpleAnalyzer | AnalysisStandardAnalyzer | AnalysisStopAnalyzer | AnalysisWhitespaceAnalyzer | AnalysisIcuAnalyzer | AnalysisKuromojiAnalyzer | AnalysisSnowballAnalyzer | AnalysisArabicAnalyzer | AnalysisArmenianAnalyzer | AnalysisBasqueAnalyzer | AnalysisBengaliAnalyzer | AnalysisBrazilianAnalyzer | AnalysisBulgarianAnalyzer | AnalysisCatalanAnalyzer | AnalysisChineseAnalyzer | AnalysisCjkAnalyzer | AnalysisCzechAnalyzer | AnalysisDanishAnalyzer | AnalysisDutchAnalyzer | AnalysisEnglishAnalyzer | AnalysisEstonianAnalyzer | AnalysisFinnishAnalyzer | AnalysisFrenchAnalyzer | AnalysisGalicianAnalyzer | AnalysisGermanAnalyzer | AnalysisGreekAnalyzer | AnalysisHindiAnalyzer | AnalysisHungarianAnalyzer | AnalysisIndonesianAnalyzer | AnalysisIrishAnalyzer | AnalysisItalianAnalyzer | AnalysisLatvianAnalyzer | AnalysisLithuanianAnalyzer | AnalysisNorwegianAnalyzer | AnalysisPersianAnalyzer | AnalysisPortugueseAnalyzer | AnalysisRomanianAnalyzer | AnalysisRussianAnalyzer | AnalysisSerbianAnalyzer | AnalysisSoraniAnalyzer | AnalysisSpanishAnalyzer | AnalysisSwedishAnalyzer | AnalysisTurkishAnalyzer | AnalysisThaiAnalyzer export interface AnalysisArabicAnalyzer { type: 'arabic' @@ -5152,7 +6762,7 @@ export interface AnalysisCompoundWordTokenFilterBase extends AnalysisTokenFilter export interface AnalysisConditionTokenFilter extends AnalysisTokenFilterBase { type: 'condition' filter: string[] - script: Script | string + script: Script | ScriptSource } export interface AnalysisCustomAnalyzer { @@ -5243,9 +6853,16 @@ export interface AnalysisEstonianAnalyzer { export interface AnalysisFingerprintAnalyzer { type: 'fingerprint' version?: VersionString + /** The maximum token size to emit. Tokens larger than this size will be discarded. + * Defaults to `255` */ max_output_size?: integer + /** The character to use to concatenate the terms. + * Defaults to a space. */ separator?: string + /** A pre-defined stop words list like `_english_` or an array containing a list of stop words. + * Defaults to `_none_`. */ stopwords?: AnalysisStopWords + /** The path to a file containing stop words. */ stopwords_path?: string } @@ -5481,17 +7098,6 @@ export interface AnalysisKuromojiTokenizer extends AnalysisTokenizerBase { discard_compound_token?: boolean } -export type AnalysisLanguage = 'Arabic' | 'Armenian' | 'Basque' | 'Brazilian' | 'Bulgarian' | 'Catalan' | 'Chinese' | 'Cjk' | 'Czech' | 'Danish' | 'Dutch' | 'English' | 'Estonian' | 'Finnish' | 'French' | 'Galician' | 'German' | 'Greek' | 'Hindi' | 'Hungarian' | 'Indonesian' | 'Irish' | 'Italian' | 'Latvian' | 'Norwegian' | 'Persian' | 'Portuguese' | 'Romanian' | 'Russian' | 'Sorani' | 'Spanish' | 'Swedish' | 'Turkish' | 'Thai' - -export interface AnalysisLanguageAnalyzer { - type: 'language' - version?: VersionString - language: AnalysisLanguage - stem_exclusion: string[] - stopwords?: AnalysisStopWords - stopwords_path?: string -} - export interface AnalysisLatvianAnalyzer { type: 'latvian' stopwords?: AnalysisStopWords @@ -5606,10 +7212,18 @@ export interface AnalysisPathHierarchyTokenizer extends AnalysisTokenizerBase { export interface AnalysisPatternAnalyzer { type: 'pattern' version?: VersionString + /** Java regular expression flags. Flags should be pipe-separated, eg "CASE_INSENSITIVE|COMMENTS". */ flags?: string + /** Should terms be lowercased or not. + * Defaults to `true`. */ lowercase?: boolean + /** A Java regular expression. + * Defaults to `\W+`. */ pattern?: string + /** A pre-defined stop words list like `_english_` or an array containing a list of stop words. + * Defaults to `_none_`. */ stopwords?: AnalysisStopWords + /** The path to a file containing stop words. */ stopwords_path?: string } @@ -5678,7 +7292,7 @@ export interface AnalysisPortugueseAnalyzer { export interface AnalysisPredicateTokenFilter extends AnalysisTokenFilterBase { type: 'predicate_token_filter' - script: Script | string + script: Script | ScriptSource } export interface AnalysisRemoveDuplicatesTokenFilter extends AnalysisTokenFilterBase { @@ -5765,8 +7379,13 @@ export interface AnalysisSpanishAnalyzer { export interface AnalysisStandardAnalyzer { type: 'standard' + /** The maximum token length. If a token is seen that exceeds this length then it is split at `max_token_length` intervals. + * Defaults to `255`. */ max_token_length?: integer + /** A pre-defined stop words list like `_english_` or an array containing a list of stop words. + * Defaults to `_none_`. */ stopwords?: AnalysisStopWords + /** The path to a file containing stop words. */ stopwords_path?: string } @@ -5784,13 +7403,17 @@ export interface AnalysisStemmerOverrideTokenFilter extends AnalysisTokenFilterB export interface AnalysisStemmerTokenFilter extends AnalysisTokenFilterBase { type: 'stemmer' language?: string + /** @alias language */ name?: string } export interface AnalysisStopAnalyzer { type: 'stop' version?: VersionString + /** A pre-defined stop words list like `_english_` or an array containing a list of stop words. + * Defaults to `_none_`. */ stopwords?: AnalysisStopWords + /** The path to a file containing stop words. */ stopwords_path?: string } @@ -5970,6 +7593,12 @@ export interface MappingBooleanProperty extends MappingDocValuesPropertyBase { fielddata?: IndicesNumericFielddata index?: boolean null_value?: boolean + ignore_malformed?: boolean + script?: Script | ScriptSource + on_script_error?: MappingOnScriptError + /** For internal use by Elastic only. Marks the field as a time series dimension. Defaults to false. + * @experimental */ + time_series_dimension?: boolean type: 'boolean' } @@ -6016,6 +7645,8 @@ export interface MappingDateNanosProperty extends MappingDocValuesPropertyBase { format?: string ignore_malformed?: boolean index?: boolean + script?: Script | ScriptSource + on_script_error?: MappingOnScriptError null_value?: DateTime precision_step?: integer type: 'date_nanos' @@ -6027,6 +7658,8 @@ export interface MappingDateProperty extends MappingDocValuesPropertyBase { format?: string ignore_malformed?: boolean index?: boolean + script?: Script | ScriptSource + on_script_error?: MappingOnScriptError null_value?: DateTime precision_step?: integer locale?: string @@ -6041,20 +7674,58 @@ export interface MappingDateRangeProperty extends MappingRangePropertyBase { export type MappingDenseVectorElementType = 'bit' | 'byte' | 'float' export interface MappingDenseVectorIndexOptions { + /** The confidence interval to use when quantizing the vectors. Can be any value between and including `0.90` and + * `1.0` or exactly `0`. When the value is `0`, this indicates that dynamic quantiles should be calculated for + * optimized quantization. When between `0.90` and `1.0`, this value restricts the values used when calculating + * the quantization thresholds. + * + * For example, a value of `0.95` will only use the middle `95%` of the values when calculating the quantization + * thresholds (e.g. the highest and lowest `2.5%` of values will be ignored). + * + * Defaults to `1/(dims + 1)` for `int8` quantized vectors and `0` for `int4` for dynamic quantile calculation. + * + * Only applicable to `int8_hnsw`, `int4_hnsw`, `int8_flat`, and `int4_flat` index types. */ confidence_interval?: float + /** The number of candidates to track while assembling the list of nearest neighbors for each new node. + * + * Only applicable to `hnsw`, `int8_hnsw`, `bbq_hnsw`, and `int4_hnsw` index types. */ ef_construction?: integer + /** The number of neighbors each node will be connected to in the HNSW graph. + * + * Only applicable to `hnsw`, `int8_hnsw`, `bbq_hnsw`, and `int4_hnsw` index types. */ m?: integer + /** The type of kNN algorithm to use. */ type: MappingDenseVectorIndexOptionsType } -export type MappingDenseVectorIndexOptionsType = 'flat' | 'hnsw' | 'int4_flat' | 'int4_hnsw' | 'int8_flat' | 'int8_hnsw' +export type MappingDenseVectorIndexOptionsType = 'bbq_flat' | 'bbq_hnsw' | 'flat' | 'hnsw' | 'int4_flat' | 'int4_hnsw' | 'int8_flat' | 'int8_hnsw' export interface MappingDenseVectorProperty extends MappingPropertyBase { type: 'dense_vector' + /** Number of vector dimensions. Can't exceed `4096`. If `dims` is not specified, it will be set to the length of + * the first vector added to the field. */ dims?: integer + /** The data type used to encode vectors. The supported data types are `float` (default), `byte`, and `bit`. */ element_type?: MappingDenseVectorElementType + /** If `true`, you can search this field using the kNN search API. */ index?: boolean + /** An optional section that configures the kNN indexing algorithm. The HNSW algorithm has two internal parameters + * that influence how the data structure is built. These can be adjusted to improve the accuracy of results, at the + * expense of slower indexing speed. + * + * This parameter can only be specified when `index` is `true`. */ index_options?: MappingDenseVectorIndexOptions + /** The vector similarity metric to use in kNN search. + * + * Documents are ranked by their vector field's similarity to the query vector. The `_score` of each document will + * be derived from the similarity, in a way that ensures scores are positive and that a larger score corresponds + * to a higher ranking. + * + * Defaults to `l2_norm` when `element_type` is `bit` otherwise defaults to `cosine`. + * + * `bit` vectors only support `l2_norm` as their similarity metric. + * + * This parameter can only be specified when `index` is `true`. */ similarity?: MappingDenseVectorSimilarity } @@ -6081,7 +7752,7 @@ export interface MappingDynamicProperty extends MappingDocValuesPropertyBase { null_value?: FieldValue boost?: double coerce?: boolean - script?: Script | string + script?: Script | ScriptSource on_script_error?: MappingOnScriptError ignore_malformed?: boolean time_series_metric?: MappingTimeSeriesMetricType @@ -6103,7 +7774,7 @@ export interface MappingDynamicProperty extends MappingDocValuesPropertyBase { export interface MappingDynamicTemplate { mapping?: MappingProperty - runtime?: MappingProperty + runtime?: MappingRuntimeField match?: string | string[] path_match?: string | string[] unmatch?: string | string[] @@ -6159,7 +7830,7 @@ export interface MappingGeoPointProperty extends MappingDocValuesPropertyBase { null_value?: GeoLocation index?: boolean on_script_error?: MappingOnScriptError - script?: Script | string + script?: Script | ScriptSource type: 'geo_point' } @@ -6167,6 +7838,7 @@ export interface MappingGeoShapeProperty extends MappingDocValuesPropertyBase { coerce?: boolean ignore_malformed?: boolean ignore_z_value?: boolean + index?: boolean orientation?: MappingGeoOrientation strategy?: MappingGeoStrategy type: 'geo_shape' @@ -6188,7 +7860,9 @@ export interface MappingIcuCollationProperty extends MappingDocValuesPropertyBas type: 'icu_collation_keyword' norms?: boolean index_options?: MappingIndexOptions + /** Should the field be searchable? */ index?: boolean + /** Accepts a string value which is substituted for any explicit null values. Defaults to null, which means the field is treated as missing. */ null_value?: string rules?: string language?: string @@ -6225,7 +7899,9 @@ export interface MappingIpProperty extends MappingDocValuesPropertyBase { ignore_malformed?: boolean null_value?: string on_script_error?: MappingOnScriptError - script?: Script | string + script?: Script | ScriptSource + /** For internal use by Elastic only. Marks the field as a time series dimension. Defaults to false. + * @experimental */ time_series_dimension?: boolean type: 'ip' } @@ -6245,13 +7921,15 @@ export interface MappingKeywordProperty extends MappingDocValuesPropertyBase { eager_global_ordinals?: boolean index?: boolean index_options?: MappingIndexOptions - script?: Script | string + script?: Script | ScriptSource on_script_error?: MappingOnScriptError normalizer?: string norms?: boolean null_value?: string similarity?: string | null split_queries_on_whitespace?: boolean + /** For internal use by Elastic only. Marks the field as a time series dimension. Defaults to false. + * @experimental */ time_series_dimension?: boolean type: 'keyword' } @@ -6267,8 +7945,13 @@ export interface MappingLongRangeProperty extends MappingRangePropertyBase { export interface MappingMatchOnlyTextProperty { type: 'match_only_text' + /** Multi-fields allow the same string value to be indexed in multiple ways for different purposes, such as one + * field for search and a multi-field for sorting and aggregations, or the same string value analyzed by different analyzers. */ fields?: Record + /** Metadata about the field. */ meta?: Record + /** Allows you to copy the values of multiple fields into a group + * field, which can then be queried as a single field. */ copy_to?: Fields } @@ -6291,14 +7974,18 @@ export interface MappingNumberPropertyBase extends MappingDocValuesPropertyBase ignore_malformed?: boolean index?: boolean on_script_error?: MappingOnScriptError - script?: Script | string + script?: Script | ScriptSource + /** For internal use by Elastic only. Marks the field as a time series dimension. Defaults to false. + * @experimental */ time_series_metric?: MappingTimeSeriesMetricType + /** For internal use by Elastic only. Marks the field as a time series dimension. Defaults to false. + * @experimental */ time_series_dimension?: boolean } export interface MappingObjectProperty extends MappingCorePropertyBase { enabled?: boolean - subobjects?: boolean + subobjects?: MappingSubobjects type?: 'object' } @@ -6325,6 +8012,7 @@ export interface MappingPointProperty extends MappingDocValuesPropertyBase { export type MappingProperty = MappingBinaryProperty | MappingBooleanProperty | MappingDynamicProperty | MappingJoinProperty | MappingKeywordProperty | MappingMatchOnlyTextProperty | MappingPercolatorProperty | MappingRankFeatureProperty | MappingRankFeaturesProperty | MappingSearchAsYouTypeProperty | MappingTextProperty | MappingVersionProperty | MappingWildcardProperty | MappingDateNanosProperty | MappingDateProperty | MappingAggregateMetricDoubleProperty | MappingDenseVectorProperty | MappingFlattenedProperty | MappingNestedProperty | MappingObjectProperty | MappingPassthroughObjectProperty | MappingSemanticTextProperty | MappingSparseVectorProperty | MappingCompletionProperty | MappingConstantKeywordProperty | MappingCountedKeywordProperty | MappingFieldAliasProperty | MappingHistogramProperty | MappingIpProperty | MappingMurmur3HashProperty | MappingTokenCountProperty | MappingGeoPointProperty | MappingGeoShapeProperty | MappingPointProperty | MappingShapeProperty | MappingByteNumberProperty | MappingDoubleNumberProperty | MappingFloatNumberProperty | MappingHalfFloatNumberProperty | MappingIntegerNumberProperty | MappingLongNumberProperty | MappingScaledFloatNumberProperty | MappingShortNumberProperty | MappingUnsignedLongNumberProperty | MappingDateRangeProperty | MappingDoubleRangeProperty | MappingFloatRangeProperty | MappingIntegerRangeProperty | MappingIpRangeProperty | MappingLongRangeProperty | MappingIcuCollationProperty export interface MappingPropertyBase { + /** Metadata about the field. */ meta?: Record properties?: Record ignore_above?: integer @@ -6354,13 +8042,21 @@ export interface MappingRoutingField { } export interface MappingRuntimeField { + /** For type `composite` */ fields?: Record + /** For type `lookup` */ fetch_fields?: (MappingRuntimeFieldFetchFields | Field)[] + /** A custom format for `date` type runtime fields. */ format?: string + /** For type `lookup` */ input_field?: Field + /** For type `lookup` */ target_field?: Field + /** For type `lookup` */ target_index?: IndexName - script?: Script | string + /** Painless script executed at query time. */ + script?: Script | ScriptSource + /** Field type, which can be: `boolean`, `composite`, `date`, `double`, `geo_point`, `ip`,`keyword`, `long`, or `lookup`. */ type: MappingRuntimeFieldType } @@ -6369,7 +8065,7 @@ export interface MappingRuntimeFieldFetchFields { format?: string } -export type MappingRuntimeFieldType = 'boolean' | 'composite' | 'date' | 'double' | 'geo_point' | 'ip' | 'keyword' | 'long' | 'lookup' +export type MappingRuntimeFieldType = 'boolean' | 'composite' | 'date' | 'double' | 'geo_point' | 'geo_shape' | 'ip' | 'keyword' | 'long' | 'lookup' export type MappingRuntimeFields = Record @@ -6395,7 +8091,14 @@ export interface MappingSearchAsYouTypeProperty extends MappingCorePropertyBase export interface MappingSemanticTextProperty { type: 'semantic_text' meta?: Record - inference_id: Id + /** Inference endpoint that will be used to generate embeddings for the field. + * This parameter cannot be updated. Use the Create inference API to create the endpoint. + * If `search_inference_id` is specified, the inference endpoint will only be used at index time. */ + inference_id?: Id + /** Inference endpoint that will be used to generate embeddings at query time. + * You can update this parameter by using the Update mapping API. Use the Create inference API to create the endpoint. + * If not specified, the inference endpoint defined by inference_id will be used at both index and query time. */ + search_inference_id?: Id } export interface MappingShapeProperty extends MappingDocValuesPropertyBase { @@ -6430,6 +8133,8 @@ export interface MappingSparseVectorProperty extends MappingPropertyBase { type: 'sparse_vector' } +export type MappingSubobjects = boolean | 'true' | 'false' | 'auto' + export interface MappingSuggestContext { name: Name path?: Field @@ -6481,7 +8186,7 @@ export interface MappingTypeMapping { date_detection?: boolean dynamic?: MappingDynamicMapping dynamic_date_formats?: string[] - dynamic_templates?: Record[] + dynamic_templates?: Partial>[] _field_names?: MappingFieldNamesField index_field?: MappingIndexField _meta?: Metadata @@ -6492,7 +8197,7 @@ export interface MappingTypeMapping { _source?: MappingSourceField runtime?: Record enabled?: boolean - subobjects?: boolean + subobjects?: MappingSubobjects _data_stream_timestamp?: MappingDataStreamTimestamp } @@ -6511,16 +8216,26 @@ export interface MappingWildcardProperty extends MappingDocValuesPropertyBase { } export interface QueryDslBoolQuery extends QueryDslQueryBase { + /** The clause (query) must appear in matching documents. + * However, unlike `must`, the score of the query will be ignored. */ filter?: QueryDslQueryContainer | QueryDslQueryContainer[] + /** Specifies the number or percentage of `should` clauses returned documents must match. */ minimum_should_match?: MinimumShouldMatch + /** The clause (query) must appear in matching documents and will contribute to the score. */ must?: QueryDslQueryContainer | QueryDslQueryContainer[] + /** The clause (query) must not appear in the matching documents. + * Because scoring is ignored, a score of `0` is returned for all documents. */ must_not?: QueryDslQueryContainer | QueryDslQueryContainer[] + /** The clause (query) should appear in the matching document. */ should?: QueryDslQueryContainer | QueryDslQueryContainer[] } export interface QueryDslBoostingQuery extends QueryDslQueryBase { + /** Floating point number between 0 and 1.0 used to decrease the relevance scores of documents matching the `negative` query. */ negative_boost: double + /** Query used to decrease the relevance score of matching documents. */ negative: QueryDslQueryContainer + /** Any returned documents must match this query. */ positive: QueryDslQueryContainer } @@ -6529,11 +8244,18 @@ export type QueryDslChildScoreMode = 'none' | 'avg' | 'sum' | 'max' | 'min' export type QueryDslCombinedFieldsOperator = 'or' | 'and' export interface QueryDslCombinedFieldsQuery extends QueryDslQueryBase { + /** List of fields to search. Field wildcard patterns are allowed. Only `text` fields are supported, and they must all have the same search `analyzer`. */ fields: Field[] + /** Text to search for in the provided `fields`. + * The `combined_fields` query analyzes the provided text before performing a search. */ query: string + /** If true, match phrase queries are automatically created for multi-term synonyms. */ auto_generate_synonyms_phrase_query?: boolean + /** Boolean logic used to interpret text in the query value. */ operator?: QueryDslCombinedFieldsOperator + /** Minimum number of clauses that must match for a document to be returned. */ minimum_should_match?: MinimumShouldMatch + /** Indicates whether no documents are returned if the analyzer removes all tokens, such as when using a `stop` filter. */ zero_terms_query?: QueryDslCombinedFieldsZeroTerms } @@ -6549,6 +8271,9 @@ export interface QueryDslCommonTermsQuery extends QueryDslQueryBase { } export interface QueryDslConstantScoreQuery extends QueryDslQueryBase { + /** Filter query you wish to run. Any returned documents must match this query. + * Filter queries do not calculate relevance scores. + * To speed up performance, Elasticsearch automatically caches frequently used filter queries. */ filter: QueryDslQueryContainer } @@ -6561,70 +8286,111 @@ export interface QueryDslDateDistanceFeatureQuery extends QueryDslDistanceFeatur } export interface QueryDslDateRangeQuery extends QueryDslRangeQueryBase { + /** Date format used to convert `date` values in the query. */ format?: DateFormat + /** Coordinated Universal Time (UTC) offset or IANA time zone used to convert `date` values in the query to UTC. */ time_zone?: TimeZone } export type QueryDslDecayFunction = QueryDslUntypedDecayFunction | QueryDslDateDecayFunction | QueryDslNumericDecayFunction | QueryDslGeoDecayFunction export interface QueryDslDecayFunctionBase { + /** Determines how the distance is calculated when a field used for computing the decay contains multiple values. */ multi_value_mode?: QueryDslMultiValueMode } export interface QueryDslDecayPlacement { + /** Defines how documents are scored at the distance given at scale. */ decay?: double + /** If defined, the decay function will only compute the decay function for documents with a distance greater than the defined `offset`. */ offset?: TScale + /** Defines the distance from origin + offset at which the computed score will equal `decay` parameter. */ scale?: TScale + /** The point of origin used for calculating distance. Must be given as a number for numeric field, date for date fields and geo point for geo fields. */ origin?: TOrigin } export interface QueryDslDisMaxQuery extends QueryDslQueryBase { + /** One or more query clauses. + * Returned documents must match one or more of these queries. + * If a document matches multiple queries, Elasticsearch uses the highest relevance score. */ queries: QueryDslQueryContainer[] + /** Floating point number between 0 and 1.0 used to increase the relevance scores of documents matching multiple query clauses. */ tie_breaker?: double } export type QueryDslDistanceFeatureQuery = QueryDslUntypedDistanceFeatureQuery | QueryDslGeoDistanceFeatureQuery | QueryDslDateDistanceFeatureQuery export interface QueryDslDistanceFeatureQueryBase extends QueryDslQueryBase { + /** Date or point of origin used to calculate distances. + * If the `field` value is a `date` or `date_nanos` field, the `origin` value must be a date. + * Date Math, such as `now-1h`, is supported. + * If the field value is a `geo_point` field, the `origin` value must be a geopoint. */ origin: TOrigin + /** Distance from the `origin` at which relevance scores receive half of the `boost` value. + * If the `field` value is a `date` or `date_nanos` field, the `pivot` value must be a time unit, such as `1h` or `10d`. If the `field` value is a `geo_point` field, the `pivot` value must be a distance unit, such as `1km` or `12m`. */ pivot: TDistance + /** Name of the field used to calculate distances. This field must meet the following criteria: + * be a `date`, `date_nanos` or `geo_point` field; + * have an `index` mapping parameter value of `true`, which is the default; + * have an `doc_values` mapping parameter value of `true`, which is the default. */ field: Field } export interface QueryDslExistsQuery extends QueryDslQueryBase { + /** Name of the field you wish to search. */ field: Field } export interface QueryDslFieldAndFormat { + /** A wildcard pattern. The request returns values for field names matching this pattern. */ field: Field + /** The format in which the values are returned. */ format?: string include_unmapped?: boolean } export interface QueryDslFieldLookup { + /** `id` of the document. */ id: Id + /** Index from which to retrieve the document. */ index?: IndexName + /** Name of the field. */ path?: Field + /** Custom routing value. */ routing?: Routing } export type QueryDslFieldValueFactorModifier = 'none' | 'log' | 'log1p' | 'log2p' | 'ln' | 'ln1p' | 'ln2p' | 'square' | 'sqrt' | 'reciprocal' export interface QueryDslFieldValueFactorScoreFunction { + /** Field to be extracted from the document. */ field: Field + /** Optional factor to multiply the field value with. */ factor?: double + /** Value used if the document doesn’t have that field. + * The modifier and factor are still applied to it as though it were read from the document. */ missing?: double + /** Modifier to apply to the field value. */ modifier?: QueryDslFieldValueFactorModifier } export type QueryDslFunctionBoostMode = 'multiply' | 'replace' | 'sum' | 'avg' | 'max' | 'min' export interface QueryDslFunctionScoreContainer { + /** Function that scores a document with a exponential decay, depending on the distance of a numeric field value of the document from an origin. */ exp?: QueryDslDecayFunction + /** Function that scores a document with a normal decay, depending on the distance of a numeric field value of the document from an origin. */ gauss?: QueryDslDecayFunction + /** Function that scores a document with a linear decay, depending on the distance of a numeric field value of the document from an origin. */ linear?: QueryDslDecayFunction + /** Function allows you to use a field from a document to influence the score. + * It’s similar to using the script_score function, however, it avoids the overhead of scripting. */ field_value_factor?: QueryDslFieldValueFactorScoreFunction + /** Generates scores that are uniformly distributed from 0 up to but not including 1. + * In case you want scores to be reproducible, it is possible to provide a `seed` and `field`. */ random_score?: QueryDslRandomScoreFunction + /** Enables you to wrap another query and customize the scoring of it optionally with a computation derived from other numeric field values in the doc using a script expression. */ script_score?: QueryDslScriptScoreFunction filter?: QueryDslQueryContainer weight?: double @@ -6633,26 +8399,42 @@ export interface QueryDslFunctionScoreContainer { export type QueryDslFunctionScoreMode = 'multiply' | 'sum' | 'avg' | 'first' | 'max' | 'min' export interface QueryDslFunctionScoreQuery extends QueryDslQueryBase { + /** Defines how he newly computed score is combined with the score of the query */ boost_mode?: QueryDslFunctionBoostMode + /** One or more functions that compute a new score for each document returned by the query. */ functions?: QueryDslFunctionScoreContainer[] + /** Restricts the new score to not exceed the provided limit. */ max_boost?: double + /** Excludes documents that do not meet the provided score threshold. */ min_score?: double + /** A query that determines the documents for which a new score is computed. */ query?: QueryDslQueryContainer + /** Specifies how the computed scores are combined */ score_mode?: QueryDslFunctionScoreMode } export interface QueryDslFuzzyQuery extends QueryDslQueryBase { + /** Maximum number of variations created. */ max_expansions?: integer + /** Number of beginning characters left unchanged when creating expansions. */ prefix_length?: integer + /** Number of beginning characters left unchanged when creating expansions. */ rewrite?: MultiTermQueryRewrite + /** Indicates whether edits include transpositions of two adjacent characters (for example `ab` to `ba`). */ transpositions?: boolean + /** Maximum edit distance allowed for matching. */ fuzziness?: Fuzziness + /** Term you wish to find in the provided field. */ value: string | double | boolean } export interface QueryDslGeoBoundingBoxQueryKeys extends QueryDslQueryBase { type?: QueryDslGeoExecution + /** Set to `IGNORE_MALFORMED` to accept geo points with invalid latitude or longitude. + * Set to `COERCE` to also try to infer correct latitude or longitude. */ validation_method?: QueryDslGeoValidationMethod + /** Set to `true` to ignore an unmapped field and not match any documents for this query. + * Set to `false` to throw an exception if the field is not mapped. */ ignore_unmapped?: boolean } export type QueryDslGeoBoundingBoxQuery = QueryDslGeoBoundingBoxQueryKeys @@ -6667,9 +8449,17 @@ export interface QueryDslGeoDistanceFeatureQuery extends QueryDslDistanceFeature } export interface QueryDslGeoDistanceQueryKeys extends QueryDslQueryBase { + /** The radius of the circle centred on the specified location. + * Points which fall into this circle are considered to be matches. */ distance: Distance + /** How to compute the distance. + * Set to `plane` for a faster calculation that's inaccurate on long distances and close to the poles. */ distance_type?: GeoDistanceType + /** Set to `IGNORE_MALFORMED` to accept geo points with invalid latitude or longitude. + * Set to `COERCE` to also try to infer correct latitude or longitude. */ validation_method?: QueryDslGeoValidationMethod + /** Set to `true` to ignore an unmapped field and not match any documents for this query. + * Set to `false` to throw an exception if the field is not mapped. */ ignore_unmapped?: boolean } export type QueryDslGeoDistanceQuery = QueryDslGeoDistanceQueryKeys @@ -6677,6 +8467,12 @@ export type QueryDslGeoDistanceQuery = QueryDslGeoDistanceQueryKeys export type QueryDslGeoExecution = 'memory' | 'indexed' +export interface QueryDslGeoGridQuery extends QueryDslQueryBase { + geogrid?: GeoTile + geohash?: GeoHash + geohex?: GeoHexCell +} + export interface QueryDslGeoPolygonPoints { points: GeoLocation[] } @@ -6690,11 +8486,15 @@ export type QueryDslGeoPolygonQuery = QueryDslGeoPolygonQueryKeys export interface QueryDslGeoShapeFieldQuery { shape?: GeoShape + /** Query using an indexed shape retrieved from the the specified document and path. */ indexed_shape?: QueryDslFieldLookup + /** Spatial relation operator used to search a geo field. */ relation?: GeoShapeRelation } export interface QueryDslGeoShapeQueryKeys extends QueryDslQueryBase { + /** Set to `true` to ignore an unmapped field and not match any documents for this query. + * Set to `false` to throw an exception if the field is not mapped. */ ignore_unmapped?: boolean } export type QueryDslGeoShapeQuery = QueryDslGeoShapeQueryKeys @@ -6703,106 +8503,181 @@ export type QueryDslGeoShapeQuery = QueryDslGeoShapeQueryKeys export type QueryDslGeoValidationMethod = 'coerce' | 'ignore_malformed' | 'strict' export interface QueryDslHasChildQuery extends QueryDslQueryBase { + /** Indicates whether to ignore an unmapped `type` and not return any documents instead of an error. */ ignore_unmapped?: boolean + /** If defined, each search hit will contain inner hits. */ inner_hits?: SearchInnerHits + /** Maximum number of child documents that match the query allowed for a returned parent document. + * If the parent document exceeds this limit, it is excluded from the search results. */ max_children?: integer + /** Minimum number of child documents that match the query required to match the query for a returned parent document. + * If the parent document does not meet this limit, it is excluded from the search results. */ min_children?: integer + /** Query you wish to run on child documents of the `type` field. + * If a child document matches the search, the query returns the parent document. */ query: QueryDslQueryContainer + /** Indicates how scores for matching child documents affect the root parent document’s relevance score. */ score_mode?: QueryDslChildScoreMode + /** Name of the child relationship mapped for the `join` field. */ type: RelationName } export interface QueryDslHasParentQuery extends QueryDslQueryBase { + /** Indicates whether to ignore an unmapped `parent_type` and not return any documents instead of an error. + * You can use this parameter to query multiple indices that may not contain the `parent_type`. */ ignore_unmapped?: boolean + /** If defined, each search hit will contain inner hits. */ inner_hits?: SearchInnerHits + /** Name of the parent relationship mapped for the `join` field. */ parent_type: RelationName + /** Query you wish to run on parent documents of the `parent_type` field. + * If a parent document matches the search, the query returns its child documents. */ query: QueryDslQueryContainer + /** Indicates whether the relevance score of a matching parent document is aggregated into its child documents. */ score?: boolean } export interface QueryDslIdsQuery extends QueryDslQueryBase { + /** An array of document IDs. */ values?: Ids } export interface QueryDslIntervalsAllOf { + /** An array of rules to combine. All rules must produce a match in a document for the overall source to match. */ intervals: QueryDslIntervalsContainer[] + /** Maximum number of positions between the matching terms. + * Intervals produced by the rules further apart than this are not considered matches. */ max_gaps?: integer + /** If `true`, intervals produced by the rules should appear in the order in which they are specified. */ ordered?: boolean + /** Rule used to filter returned intervals. */ filter?: QueryDslIntervalsFilter } export interface QueryDslIntervalsAnyOf { + /** An array of rules to match. */ intervals: QueryDslIntervalsContainer[] + /** Rule used to filter returned intervals. */ filter?: QueryDslIntervalsFilter } export interface QueryDslIntervalsContainer { + /** Returns matches that span a combination of other rules. */ all_of?: QueryDslIntervalsAllOf + /** Returns intervals produced by any of its sub-rules. */ any_of?: QueryDslIntervalsAnyOf + /** Matches analyzed text. */ fuzzy?: QueryDslIntervalsFuzzy + /** Matches analyzed text. */ match?: QueryDslIntervalsMatch + /** Matches terms that start with a specified set of characters. */ prefix?: QueryDslIntervalsPrefix + /** Matches terms using a wildcard pattern. */ wildcard?: QueryDslIntervalsWildcard } export interface QueryDslIntervalsFilter { + /** Query used to return intervals that follow an interval from the `filter` rule. */ after?: QueryDslIntervalsContainer + /** Query used to return intervals that occur before an interval from the `filter` rule. */ before?: QueryDslIntervalsContainer + /** Query used to return intervals contained by an interval from the `filter` rule. */ contained_by?: QueryDslIntervalsContainer + /** Query used to return intervals that contain an interval from the `filter` rule. */ containing?: QueryDslIntervalsContainer + /** Query used to return intervals that are **not** contained by an interval from the `filter` rule. */ not_contained_by?: QueryDslIntervalsContainer + /** Query used to return intervals that do **not** contain an interval from the `filter` rule. */ not_containing?: QueryDslIntervalsContainer + /** Query used to return intervals that do **not** overlap with an interval from the `filter` rule. */ not_overlapping?: QueryDslIntervalsContainer + /** Query used to return intervals that overlap with an interval from the `filter` rule. */ overlapping?: QueryDslIntervalsContainer - script?: Script | string + /** Script used to return matching documents. + * This script must return a boolean value: `true` or `false`. */ + script?: Script | ScriptSource } export interface QueryDslIntervalsFuzzy { + /** Analyzer used to normalize the term. */ analyzer?: string + /** Maximum edit distance allowed for matching. */ fuzziness?: Fuzziness + /** Number of beginning characters left unchanged when creating expansions. */ prefix_length?: integer + /** The term to match. */ term: string + /** Indicates whether edits include transpositions of two adjacent characters (for example, `ab` to `ba`). */ transpositions?: boolean + /** If specified, match intervals from this field rather than the top-level field. + * The `term` is normalized using the search analyzer from this field, unless `analyzer` is specified separately. */ use_field?: Field } export interface QueryDslIntervalsMatch { + /** Analyzer used to analyze terms in the query. */ analyzer?: string + /** Maximum number of positions between the matching terms. + * Terms further apart than this are not considered matches. */ max_gaps?: integer + /** If `true`, matching terms must appear in their specified order. */ ordered?: boolean + /** Text you wish to find in the provided field. */ query: string + /** If specified, match intervals from this field rather than the top-level field. + * The `term` is normalized using the search analyzer from this field, unless `analyzer` is specified separately. */ use_field?: Field + /** An optional interval filter. */ filter?: QueryDslIntervalsFilter } export interface QueryDslIntervalsPrefix { + /** Analyzer used to analyze the `prefix`. */ analyzer?: string + /** Beginning characters of terms you wish to find in the top-level field. */ prefix: string + /** If specified, match intervals from this field rather than the top-level field. + * The `prefix` is normalized using the search analyzer from this field, unless `analyzer` is specified separately. */ use_field?: Field } export interface QueryDslIntervalsQuery extends QueryDslQueryBase { + /** Returns matches that span a combination of other rules. */ all_of?: QueryDslIntervalsAllOf + /** Returns intervals produced by any of its sub-rules. */ any_of?: QueryDslIntervalsAnyOf + /** Matches terms that are similar to the provided term, within an edit distance defined by `fuzziness`. */ fuzzy?: QueryDslIntervalsFuzzy + /** Matches analyzed text. */ match?: QueryDslIntervalsMatch + /** Matches terms that start with a specified set of characters. */ prefix?: QueryDslIntervalsPrefix + /** Matches terms using a wildcard pattern. */ wildcard?: QueryDslIntervalsWildcard } export interface QueryDslIntervalsWildcard { + /** Analyzer used to analyze the `pattern`. + * Defaults to the top-level field's analyzer. */ analyzer?: string + /** Wildcard pattern used to find matching terms. */ pattern: string + /** If specified, match intervals from this field rather than the top-level field. + * The `pattern` is normalized using the search analyzer from this field, unless `analyzer` is specified separately. */ use_field?: Field } export type QueryDslLike = string | QueryDslLikeDocument export interface QueryDslLikeDocument { + /** A document not present in the index. */ doc?: any fields?: Field[] + /** ID of a document. */ _id?: Id + /** Index of a document. */ _index?: IndexName + /** Overrides the default analyzer. */ per_field_analyzer?: Record routing?: Routing version?: VersionNumber @@ -6813,14 +8688,31 @@ export interface QueryDslMatchAllQuery extends QueryDslQueryBase { } export interface QueryDslMatchBoolPrefixQuery extends QueryDslQueryBase { + /** Analyzer used to convert the text in the query value into tokens. */ analyzer?: string + /** Maximum edit distance allowed for matching. + * Can be applied to the term subqueries constructed for all terms but the final term. */ fuzziness?: Fuzziness + /** Method used to rewrite the query. + * Can be applied to the term subqueries constructed for all terms but the final term. */ fuzzy_rewrite?: MultiTermQueryRewrite + /** If `true`, edits for fuzzy matching include transpositions of two adjacent characters (for example, `ab` to `ba`). + * Can be applied to the term subqueries constructed for all terms but the final term. */ fuzzy_transpositions?: boolean + /** Maximum number of terms to which the query will expand. + * Can be applied to the term subqueries constructed for all terms but the final term. */ max_expansions?: integer + /** Minimum number of clauses that must match for a document to be returned. + * Applied to the constructed bool query. */ minimum_should_match?: MinimumShouldMatch + /** Boolean logic used to interpret text in the query value. + * Applied to the constructed bool query. */ operator?: QueryDslOperator + /** Number of beginning characters left unchanged for fuzzy matching. + * Can be applied to the term subqueries constructed for all terms but the final term. */ prefix_length?: integer + /** Terms you wish to find in the provided field. + * The last term is used in a prefix query. */ query: string } @@ -6828,84 +8720,149 @@ export interface QueryDslMatchNoneQuery extends QueryDslQueryBase { } export interface QueryDslMatchPhrasePrefixQuery extends QueryDslQueryBase { + /** Analyzer used to convert text in the query value into tokens. */ analyzer?: string + /** Maximum number of terms to which the last provided term of the query value will expand. */ max_expansions?: integer + /** Text you wish to find in the provided field. */ query: string + /** Maximum number of positions allowed between matching tokens. */ slop?: integer + /** Indicates whether no documents are returned if the analyzer removes all tokens, such as when using a `stop` filter. */ zero_terms_query?: QueryDslZeroTermsQuery } export interface QueryDslMatchPhraseQuery extends QueryDslQueryBase { + /** Analyzer used to convert the text in the query value into tokens. */ analyzer?: string + /** Query terms that are analyzed and turned into a phrase query. */ query: string + /** Maximum number of positions allowed between matching tokens. */ slop?: integer + /** Indicates whether no documents are returned if the `analyzer` removes all tokens, such as when using a `stop` filter. */ zero_terms_query?: QueryDslZeroTermsQuery } export interface QueryDslMatchQuery extends QueryDslQueryBase { + /** Analyzer used to convert the text in the query value into tokens. */ analyzer?: string + /** If `true`, match phrase queries are automatically created for multi-term synonyms. */ auto_generate_synonyms_phrase_query?: boolean cutoff_frequency?: double + /** Maximum edit distance allowed for matching. */ fuzziness?: Fuzziness + /** Method used to rewrite the query. */ fuzzy_rewrite?: MultiTermQueryRewrite + /** If `true`, edits for fuzzy matching include transpositions of two adjacent characters (for example, `ab` to `ba`). */ fuzzy_transpositions?: boolean + /** If `true`, format-based errors, such as providing a text query value for a numeric field, are ignored. */ lenient?: boolean + /** Maximum number of terms to which the query will expand. */ max_expansions?: integer + /** Minimum number of clauses that must match for a document to be returned. */ minimum_should_match?: MinimumShouldMatch + /** Boolean logic used to interpret text in the query value. */ operator?: QueryDslOperator + /** Number of beginning characters left unchanged for fuzzy matching. */ prefix_length?: integer + /** Text, number, boolean value or date you wish to find in the provided field. */ query: string | float | boolean + /** Indicates whether no documents are returned if the `analyzer` removes all tokens, such as when using a `stop` filter. */ zero_terms_query?: QueryDslZeroTermsQuery } export interface QueryDslMoreLikeThisQuery extends QueryDslQueryBase { + /** The analyzer that is used to analyze the free form text. + * Defaults to the analyzer associated with the first field in fields. */ analyzer?: string + /** Each term in the formed query could be further boosted by their tf-idf score. + * This sets the boost factor to use when using this feature. + * Defaults to deactivated (0). */ boost_terms?: double + /** Controls whether the query should fail (throw an exception) if any of the specified fields are not of the supported types (`text` or `keyword`). */ fail_on_unsupported_field?: boolean + /** A list of fields to fetch and analyze the text from. + * Defaults to the `index.query.default_field` index setting, which has a default value of `*`. */ fields?: Field[] + /** Specifies whether the input documents should also be included in the search results returned. */ include?: boolean + /** Specifies free form text and/or a single or multiple documents for which you want to find similar documents. */ like: QueryDslLike | QueryDslLike[] + /** The maximum document frequency above which the terms are ignored from the input document. */ max_doc_freq?: integer + /** The maximum number of query terms that can be selected. */ max_query_terms?: integer + /** The maximum word length above which the terms are ignored. + * Defaults to unbounded (`0`). */ max_word_length?: integer + /** The minimum document frequency below which the terms are ignored from the input document. */ min_doc_freq?: integer + /** After the disjunctive query has been formed, this parameter controls the number of terms that must match. */ minimum_should_match?: MinimumShouldMatch + /** The minimum term frequency below which the terms are ignored from the input document. */ min_term_freq?: integer + /** The minimum word length below which the terms are ignored. */ min_word_length?: integer routing?: Routing + /** An array of stop words. + * Any word in this set is ignored. */ stop_words?: AnalysisStopWords + /** Used in combination with `like` to exclude documents that match a set of terms. */ unlike?: QueryDslLike | QueryDslLike[] version?: VersionNumber version_type?: VersionType } export interface QueryDslMultiMatchQuery extends QueryDslQueryBase { + /** Analyzer used to convert the text in the query value into tokens. */ analyzer?: string + /** If `true`, match phrase queries are automatically created for multi-term synonyms. */ auto_generate_synonyms_phrase_query?: boolean cutoff_frequency?: double + /** The fields to be queried. + * Defaults to the `index.query.default_field` index settings, which in turn defaults to `*`. */ fields?: Fields + /** Maximum edit distance allowed for matching. */ fuzziness?: Fuzziness + /** Method used to rewrite the query. */ fuzzy_rewrite?: MultiTermQueryRewrite + /** If `true`, edits for fuzzy matching include transpositions of two adjacent characters (for example, `ab` to `ba`). + * Can be applied to the term subqueries constructed for all terms but the final term. */ fuzzy_transpositions?: boolean + /** If `true`, format-based errors, such as providing a text query value for a numeric field, are ignored. */ lenient?: boolean + /** Maximum number of terms to which the query will expand. */ max_expansions?: integer + /** Minimum number of clauses that must match for a document to be returned. */ minimum_should_match?: MinimumShouldMatch + /** Boolean logic used to interpret text in the query value. */ operator?: QueryDslOperator + /** Number of beginning characters left unchanged for fuzzy matching. */ prefix_length?: integer + /** Text, number, boolean value or date you wish to find in the provided field. */ query: string + /** Maximum number of positions allowed between matching tokens. */ slop?: integer + /** Determines how scores for each per-term blended query and scores across groups are combined. */ tie_breaker?: double + /** How `the` multi_match query is executed internally. */ type?: QueryDslTextQueryType + /** Indicates whether no documents are returned if the `analyzer` removes all tokens, such as when using a `stop` filter. */ zero_terms_query?: QueryDslZeroTermsQuery } export type QueryDslMultiValueMode = 'min' | 'max' | 'avg' | 'sum' export interface QueryDslNestedQuery extends QueryDslQueryBase { + /** Indicates whether to ignore an unmapped path and not return any documents instead of an error. */ ignore_unmapped?: boolean + /** If defined, each search hit will contain inner hits. */ inner_hits?: SearchInnerHits + /** Path to the nested object you wish to search. */ path: Field + /** Query you wish to run on nested objects in the path. */ query: QueryDslQueryContainer + /** How scores for matching child objects affect the root parent document’s relevance score. */ score_mode?: QueryDslChildScoreMode } @@ -6920,133 +8877,262 @@ export type QueryDslNumericDecayFunction = QueryDslNumericDecayFunctionKeys export type QueryDslOperator = 'and' | 'AND' | 'or' | 'OR' export interface QueryDslParentIdQuery extends QueryDslQueryBase { + /** ID of the parent document. */ id?: Id + /** Indicates whether to ignore an unmapped `type` and not return any documents instead of an error. */ ignore_unmapped?: boolean + /** Name of the child relationship mapped for the `join` field. */ type?: RelationName } export interface QueryDslPercolateQuery extends QueryDslQueryBase { + /** The source of the document being percolated. */ document?: any + /** An array of sources of the documents being percolated. */ documents?: any[] + /** Field that holds the indexed queries. The field must use the `percolator` mapping type. */ field: Field + /** The ID of a stored document to percolate. */ id?: Id + /** The index of a stored document to percolate. */ index?: IndexName + /** The suffix used for the `_percolator_document_slot` field when multiple `percolate` queries are specified. */ name?: string + /** Preference used to fetch document to percolate. */ preference?: string + /** Routing used to fetch document to percolate. */ routing?: Routing + /** The expected version of a stored document to percolate. */ version?: VersionNumber } export interface QueryDslPinnedDoc { + /** The unique document ID. */ _id: Id + /** The index that contains the document. */ _index?: IndexName } export interface QueryDslPinnedQuery extends QueryDslQueryBase { + /** Any choice of query used to rank documents which will be ranked below the "pinned" documents. */ organic: QueryDslQueryContainer + /** Document IDs listed in the order they are to appear in results. + * Required if `docs` is not specified. */ ids?: Id[] + /** Documents listed in the order they are to appear in results. + * Required if `ids` is not specified. */ docs?: QueryDslPinnedDoc[] } export interface QueryDslPrefixQuery extends QueryDslQueryBase { + /** Method used to rewrite the query. */ rewrite?: MultiTermQueryRewrite + /** Beginning characters of terms you wish to find in the provided field. */ value: string + /** Allows ASCII case insensitive matching of the value with the indexed field values when set to `true`. + * Default is `false` which means the case sensitivity of matching depends on the underlying field’s mapping. */ case_insensitive?: boolean } export interface QueryDslQueryBase { + /** Floating point number used to decrease or increase the relevance scores of the query. + * Boost values are relative to the default value of 1.0. + * A boost value between 0 and 1.0 decreases the relevance score. + * A value greater than 1.0 increases the relevance score. */ boost?: float _name?: string } export interface QueryDslQueryContainer { + /** matches documents matching boolean combinations of other queries. */ bool?: QueryDslBoolQuery + /** Returns documents matching a `positive` query while reducing the relevance score of documents that also match a `negative` query. */ boosting?: QueryDslBoostingQuery common?: Partial> + /** The `combined_fields` query supports searching multiple text fields as if their contents had been indexed into one combined field. */ combined_fields?: QueryDslCombinedFieldsQuery + /** Wraps a filter query and returns every matching document with a relevance score equal to the `boost` parameter value. */ constant_score?: QueryDslConstantScoreQuery + /** Returns documents matching one or more wrapped queries, called query clauses or clauses. + * If a returned document matches multiple query clauses, the `dis_max` query assigns the document the highest relevance score from any matching clause, plus a tie breaking increment for any additional matching subqueries. */ dis_max?: QueryDslDisMaxQuery + /** Boosts the relevance score of documents closer to a provided origin date or point. + * For example, you can use this query to give more weight to documents closer to a certain date or location. */ distance_feature?: QueryDslDistanceFeatureQuery + /** Returns documents that contain an indexed value for a field. */ exists?: QueryDslExistsQuery + /** The `function_score` enables you to modify the score of documents that are retrieved by a query. */ function_score?: QueryDslFunctionScoreQuery | QueryDslFunctionScoreContainer[] + /** Returns documents that contain terms similar to the search term, as measured by a Levenshtein edit distance. */ fuzzy?: Partial> + /** Matches geo_point and geo_shape values that intersect a bounding box. */ geo_bounding_box?: QueryDslGeoBoundingBoxQuery + /** Matches `geo_point` and `geo_shape` values within a given distance of a geopoint. */ geo_distance?: QueryDslGeoDistanceQuery + /** Matches `geo_point` and `geo_shape` values that intersect a grid cell from a GeoGrid aggregation. */ + geo_grid?: Partial> geo_polygon?: QueryDslGeoPolygonQuery + /** Filter documents indexed using either the `geo_shape` or the `geo_point` type. */ geo_shape?: QueryDslGeoShapeQuery + /** Returns parent documents whose joined child documents match a provided query. */ has_child?: QueryDslHasChildQuery + /** Returns child documents whose joined parent document matches a provided query. */ has_parent?: QueryDslHasParentQuery + /** Returns documents based on their IDs. + * This query uses document IDs stored in the `_id` field. */ ids?: QueryDslIdsQuery + /** Returns documents based on the order and proximity of matching terms. */ intervals?: Partial> + /** Finds the k nearest vectors to a query vector, as measured by a similarity + * metric. knn query finds nearest vectors through approximate search on indexed + * dense_vectors. */ knn?: KnnQuery + /** Returns documents that match a provided text, number, date or boolean value. + * The provided text is analyzed before matching. */ match?: Partial> + /** Matches all documents, giving them all a `_score` of 1.0. */ match_all?: QueryDslMatchAllQuery + /** Analyzes its input and constructs a `bool` query from the terms. + * Each term except the last is used in a `term` query. + * The last term is used in a prefix query. */ match_bool_prefix?: Partial> + /** Matches no documents. */ match_none?: QueryDslMatchNoneQuery + /** Analyzes the text and creates a phrase query out of the analyzed text. */ match_phrase?: Partial> + /** Returns documents that contain the words of a provided text, in the same order as provided. + * The last term of the provided text is treated as a prefix, matching any words that begin with that term. */ match_phrase_prefix?: Partial> + /** Returns documents that are "like" a given set of documents. */ more_like_this?: QueryDslMoreLikeThisQuery + /** Enables you to search for a provided text, number, date or boolean value across multiple fields. + * The provided text is analyzed before matching. */ multi_match?: QueryDslMultiMatchQuery + /** Wraps another query to search nested fields. + * If an object matches the search, the nested query returns the root parent document. */ nested?: QueryDslNestedQuery + /** Returns child documents joined to a specific parent document. */ parent_id?: QueryDslParentIdQuery + /** Matches queries stored in an index. */ percolate?: QueryDslPercolateQuery + /** Promotes selected documents to rank higher than those matching a given query. */ pinned?: QueryDslPinnedQuery + /** Returns documents that contain a specific prefix in a provided field. */ prefix?: Partial> + /** Returns documents based on a provided query string, using a parser with a strict syntax. */ query_string?: QueryDslQueryStringQuery + /** Returns documents that contain terms within a provided range. */ range?: Partial> + /** Boosts the relevance score of documents based on the numeric value of a `rank_feature` or `rank_features` field. */ rank_feature?: QueryDslRankFeatureQuery + /** Returns documents that contain terms matching a regular expression. */ regexp?: Partial> rule?: QueryDslRuleQuery + /** Filters documents based on a provided script. + * The script query is typically used in a filter context. */ script?: QueryDslScriptQuery + /** Uses a script to provide a custom score for returned documents. */ script_score?: QueryDslScriptScoreQuery + /** A semantic query to semantic_text field types */ semantic?: QueryDslSemanticQuery + /** Queries documents that contain fields indexed using the `shape` type. */ shape?: QueryDslShapeQuery + /** Returns documents based on a provided query string, using a parser with a limited but fault-tolerant syntax. */ simple_query_string?: QueryDslSimpleQueryStringQuery + /** Returns matches which enclose another span query. */ span_containing?: QueryDslSpanContainingQuery + /** Wrapper to allow span queries to participate in composite single-field span queries by _lying_ about their search field. */ span_field_masking?: QueryDslSpanFieldMaskingQuery + /** Matches spans near the beginning of a field. */ span_first?: QueryDslSpanFirstQuery + /** Allows you to wrap a multi term query (one of `wildcard`, `fuzzy`, `prefix`, `range`, or `regexp` query) as a `span` query, so it can be nested. */ span_multi?: QueryDslSpanMultiTermQuery + /** Matches spans which are near one another. + * You can specify `slop`, the maximum number of intervening unmatched positions, as well as whether matches are required to be in-order. */ span_near?: QueryDslSpanNearQuery + /** Removes matches which overlap with another span query or which are within x tokens before (controlled by the parameter `pre`) or y tokens after (controlled by the parameter `post`) another span query. */ span_not?: QueryDslSpanNotQuery + /** Matches the union of its span clauses. */ span_or?: QueryDslSpanOrQuery + /** Matches spans containing a term. */ span_term?: Partial> + /** Returns matches which are enclosed inside another span query. */ span_within?: QueryDslSpanWithinQuery + /** Using input query vectors or a natural language processing model to convert a query into a list of token-weight pairs, queries against a sparse vector field. */ sparse_vector?: QueryDslSparseVectorQuery + /** Returns documents that contain an exact term in a provided field. + * To return a document, the query term must exactly match the queried field's value, including whitespace and capitalization. */ term?: Partial> + /** Returns documents that contain one or more exact terms in a provided field. + * To return a document, one or more terms must exactly match a field value, including whitespace and capitalization. */ terms?: QueryDslTermsQuery + /** Returns documents that contain a minimum number of exact terms in a provided field. + * To return a document, a required number of terms must exactly match the field values, including whitespace and capitalization. */ terms_set?: Partial> + /** Uses a natural language processing model to convert the query text into a list of token-weight pairs which are then used in a query against a sparse vector or rank features field. */ text_expansion?: Partial> + /** Supports returning text_expansion query results by sending in precomputed tokens with the query. */ weighted_tokens?: Partial> + /** Returns documents that contain terms matching a wildcard pattern. */ wildcard?: Partial> + /** A query that accepts any other query as base64 encoded string. */ wrapper?: QueryDslWrapperQuery type?: QueryDslTypeQuery } export interface QueryDslQueryStringQuery extends QueryDslQueryBase { + /** If `true`, the wildcard characters `*` and `?` are allowed as the first character of the query string. */ allow_leading_wildcard?: boolean + /** Analyzer used to convert text in the query string into tokens. */ analyzer?: string + /** If `true`, the query attempts to analyze wildcard terms in the query string. */ analyze_wildcard?: boolean + /** If `true`, match phrase queries are automatically created for multi-term synonyms. */ auto_generate_synonyms_phrase_query?: boolean + /** Default field to search if no field is provided in the query string. + * Supports wildcards (`*`). + * Defaults to the `index.query.default_field` index setting, which has a default value of `*`. */ default_field?: Field + /** Default boolean logic used to interpret text in the query string if no operators are specified. */ default_operator?: QueryDslOperator + /** If `true`, enable position increments in queries constructed from a `query_string` search. */ enable_position_increments?: boolean escape?: boolean + /** Array of fields to search. Supports wildcards (`*`). */ fields?: Field[] + /** Maximum edit distance allowed for fuzzy matching. */ fuzziness?: Fuzziness + /** Maximum number of terms to which the query expands for fuzzy matching. */ fuzzy_max_expansions?: integer + /** Number of beginning characters left unchanged for fuzzy matching. */ fuzzy_prefix_length?: integer + /** Method used to rewrite the query. */ fuzzy_rewrite?: MultiTermQueryRewrite + /** If `true`, edits for fuzzy matching include transpositions of two adjacent characters (for example, `ab` to `ba`). */ fuzzy_transpositions?: boolean + /** If `true`, format-based errors, such as providing a text value for a numeric field, are ignored. */ lenient?: boolean + /** Maximum number of automaton states required for the query. */ max_determinized_states?: integer + /** Minimum number of clauses that must match for a document to be returned. */ minimum_should_match?: MinimumShouldMatch + /** Maximum number of positions allowed between matching tokens for phrases. */ phrase_slop?: double + /** Query string you wish to parse and use for search. */ query: string + /** Analyzer used to convert quoted text in the query string into tokens. + * For quoted text, this parameter overrides the analyzer specified in the `analyzer` parameter. */ quote_analyzer?: string + /** Suffix appended to quoted text in the query string. + * You can use this suffix to use a different analysis method for exact matches. */ quote_field_suffix?: string + /** Method used to rewrite the query. */ rewrite?: MultiTermQueryRewrite + /** How to combine the queries generated from the individual search terms in the resulting `dis_max` query. */ tie_breaker?: double + /** Coordinated Universal Time (UTC) offset or IANA time zone used to convert date values in the query string to UTC. */ time_zone?: TimeZone + /** Determines how the query matches and scores documents. */ type?: QueryDslTextQueryType } @@ -7058,10 +9144,15 @@ export interface QueryDslRandomScoreFunction { export type QueryDslRangeQuery = QueryDslUntypedRangeQuery | QueryDslDateRangeQuery | QueryDslNumberRangeQuery | QueryDslTermRangeQuery export interface QueryDslRangeQueryBase extends QueryDslQueryBase { + /** Indicates how the range query matches values for `range` fields. */ relation?: QueryDslRangeRelation + /** Greater than. */ gt?: T + /** Greater than or equal to. */ gte?: T + /** Less than. */ lt?: T + /** Less than or equal to. */ lte?: T from?: T | null to?: T | null @@ -7076,31 +9167,46 @@ export interface QueryDslRankFeatureFunctionLinear { } export interface QueryDslRankFeatureFunctionLogarithm { + /** Configurable scaling factor. */ scaling_factor: float } export interface QueryDslRankFeatureFunctionSaturation { + /** Configurable pivot value so that the result will be less than 0.5. */ pivot?: float } export interface QueryDslRankFeatureFunctionSigmoid { + /** Configurable pivot value so that the result will be less than 0.5. */ pivot: float + /** Configurable Exponent. */ exponent: float } export interface QueryDslRankFeatureQuery extends QueryDslQueryBase { + /** `rank_feature` or `rank_features` field used to boost relevance scores. */ field: Field + /** Saturation function used to boost relevance scores based on the value of the rank feature `field`. */ saturation?: QueryDslRankFeatureFunctionSaturation + /** Logarithmic function used to boost relevance scores based on the value of the rank feature `field`. */ log?: QueryDslRankFeatureFunctionLogarithm + /** Linear function used to boost relevance scores based on the value of the rank feature `field`. */ linear?: QueryDslRankFeatureFunctionLinear + /** Sigmoid function used to boost relevance scores based on the value of the rank feature `field`. */ sigmoid?: QueryDslRankFeatureFunctionSigmoid } export interface QueryDslRegexpQuery extends QueryDslQueryBase { + /** Allows case insensitive matching of the regular expression value with the indexed field values when set to `true`. + * When `false`, case sensitivity of matching depends on the underlying field’s mapping. */ case_insensitive?: boolean + /** Enables optional operators for the regular expression. */ flags?: string + /** Maximum number of automaton states required for the query. */ max_determinized_states?: integer + /** Method used to rewrite the query. */ rewrite?: MultiTermQueryRewrite + /** Regular expression for terms you wish to find in the provided field. */ value: string } @@ -7111,31 +9217,44 @@ export interface QueryDslRuleQuery extends QueryDslQueryBase { } export interface QueryDslScriptQuery extends QueryDslQueryBase { - script: Script | string + /** Contains a script to run as a query. + * This script must return a boolean value, `true` or `false`. */ + script: Script | ScriptSource } export interface QueryDslScriptScoreFunction { - script: Script | string + /** A script that computes a score. */ + script: Script | ScriptSource } export interface QueryDslScriptScoreQuery extends QueryDslQueryBase { + /** Documents with a score lower than this floating point number are excluded from the search results. */ min_score?: float + /** Query used to return documents. */ query: QueryDslQueryContainer - script: Script | string + /** Script used to compute the score of documents returned by the query. + * Important: final relevance scores from the `script_score` query cannot be negative. */ + script: Script | ScriptSource } export interface QueryDslSemanticQuery extends QueryDslQueryBase { + /** The field to query, which must be a semantic_text field type */ field: string + /** The query text */ query: string } export interface QueryDslShapeFieldQuery { + /** Queries using a pre-indexed shape. */ indexed_shape?: QueryDslFieldLookup + /** Spatial relation between the query shape and the document shape. */ relation?: GeoShapeRelation + /** Queries using an inline shape definition in GeoJSON or Well Known Text (WKT) format. */ shape?: GeoShape } export interface QueryDslShapeQueryKeys extends QueryDslQueryBase { + /** When set to `true` the query ignores an unmapped field and will not match any documents. */ ignore_unmapped?: boolean } export type QueryDslShapeQuery = QueryDslShapeQueryKeys @@ -7146,23 +9265,43 @@ export type QueryDslSimpleQueryStringFlag = 'NONE' | 'AND' | 'NOT' | 'OR' | 'PRE export type QueryDslSimpleQueryStringFlags = SpecUtilsPipeSeparatedFlags export interface QueryDslSimpleQueryStringQuery extends QueryDslQueryBase { + /** Analyzer used to convert text in the query string into tokens. */ analyzer?: string + /** If `true`, the query attempts to analyze wildcard terms in the query string. */ analyze_wildcard?: boolean + /** If `true`, the parser creates a match_phrase query for each multi-position token. */ auto_generate_synonyms_phrase_query?: boolean + /** Default boolean logic used to interpret text in the query string if no operators are specified. */ default_operator?: QueryDslOperator + /** Array of fields you wish to search. + * Accepts wildcard expressions. + * You also can boost relevance scores for matches to particular fields using a caret (`^`) notation. + * Defaults to the `index.query.default_field index` setting, which has a default value of `*`. */ fields?: Field[] + /** List of enabled operators for the simple query string syntax. */ flags?: QueryDslSimpleQueryStringFlags + /** Maximum number of terms to which the query expands for fuzzy matching. */ fuzzy_max_expansions?: integer + /** Number of beginning characters left unchanged for fuzzy matching. */ fuzzy_prefix_length?: integer + /** If `true`, edits for fuzzy matching include transpositions of two adjacent characters (for example, `ab` to `ba`). */ fuzzy_transpositions?: boolean + /** If `true`, format-based errors, such as providing a text value for a numeric field, are ignored. */ lenient?: boolean + /** Minimum number of clauses that must match for a document to be returned. */ minimum_should_match?: MinimumShouldMatch + /** Query string in the simple query string syntax you wish to parse and use for search. */ query: string + /** Suffix appended to quoted text in the query string. */ quote_field_suffix?: string } export interface QueryDslSpanContainingQuery extends QueryDslQueryBase { + /** Can be any span query. + * Matching spans from `big` that contain matches from `little` are returned. */ big: QueryDslSpanQuery + /** Can be any span query. + * Matching spans from `big` that contain matches from `little` are returned. */ little: QueryDslSpanQuery } @@ -7172,68 +9311,118 @@ export interface QueryDslSpanFieldMaskingQuery extends QueryDslQueryBase { } export interface QueryDslSpanFirstQuery extends QueryDslQueryBase { + /** Controls the maximum end position permitted in a match. */ end: integer + /** Can be any other span type query. */ match: QueryDslSpanQuery } export type QueryDslSpanGapQuery = Partial> export interface QueryDslSpanMultiTermQuery extends QueryDslQueryBase { + /** Should be a multi term query (one of `wildcard`, `fuzzy`, `prefix`, `range`, or `regexp` query). */ match: QueryDslQueryContainer } export interface QueryDslSpanNearQuery extends QueryDslQueryBase { + /** Array of one or more other span type queries. */ clauses: QueryDslSpanQuery[] + /** Controls whether matches are required to be in-order. */ in_order?: boolean + /** Controls the maximum number of intervening unmatched positions permitted. */ slop?: integer } export interface QueryDslSpanNotQuery extends QueryDslQueryBase { + /** The number of tokens from within the include span that can’t have overlap with the exclude span. + * Equivalent to setting both `pre` and `post`. */ dist?: integer + /** Span query whose matches must not overlap those returned. */ exclude: QueryDslSpanQuery + /** Span query whose matches are filtered. */ include: QueryDslSpanQuery + /** The number of tokens after the include span that can’t have overlap with the exclude span. */ post?: integer + /** The number of tokens before the include span that can’t have overlap with the exclude span. */ pre?: integer } export interface QueryDslSpanOrQuery extends QueryDslQueryBase { + /** Array of one or more other span type queries. */ clauses: QueryDslSpanQuery[] } export interface QueryDslSpanQuery { + /** Accepts a list of span queries, but only returns those spans which also match a second span query. */ span_containing?: QueryDslSpanContainingQuery + /** Allows queries like `span_near` or `span_or` across different fields. */ span_field_masking?: QueryDslSpanFieldMaskingQuery + /** Accepts another span query whose matches must appear within the first N positions of the field. */ span_first?: QueryDslSpanFirstQuery span_gap?: QueryDslSpanGapQuery + /** Wraps a `term`, `range`, `prefix`, `wildcard`, `regexp`, or `fuzzy` query. */ span_multi?: QueryDslSpanMultiTermQuery + /** Accepts multiple span queries whose matches must be within the specified distance of each other, and possibly in the same order. */ span_near?: QueryDslSpanNearQuery + /** Wraps another span query, and excludes any documents which match that query. */ span_not?: QueryDslSpanNotQuery + /** Combines multiple span queriesandreturns documents which match any of the specified queries. */ span_or?: QueryDslSpanOrQuery + /** The equivalent of the `term` query but for use with other span queries. */ span_term?: Partial> + /** The result from a single span query is returned as long is its span falls within the spans returned by a list of other span queries. */ span_within?: QueryDslSpanWithinQuery } export interface QueryDslSpanTermQuery extends QueryDslQueryBase { value: FieldValue + /** @alias value */ term: FieldValue } export interface QueryDslSpanWithinQuery extends QueryDslQueryBase { + /** Can be any span query. + * Matching spans from `little` that are enclosed within `big` are returned. */ big: QueryDslSpanQuery + /** Can be any span query. + * Matching spans from `little` that are enclosed within `big` are returned. */ little: QueryDslSpanQuery } export interface QueryDslSparseVectorQuery extends QueryDslQueryBase { + /** The name of the field that contains the token-weight pairs to be searched against. + * This field must be a mapped sparse_vector field. */ field: Field + /** Dictionary of precomputed sparse vectors and their associated weights. + * Only one of inference_id or query_vector may be supplied in a request. */ query_vector?: Record + /** The inference ID to use to convert the query text into token-weight pairs. + * It must be the same inference ID that was used to create the tokens from the input text. + * Only one of inference_id and query_vector is allowed. + * If inference_id is specified, query must also be specified. + * Only one of inference_id or query_vector may be supplied in a request. */ inference_id?: Id + /** The query text you want to use for search. + * If inference_id is specified, query must also be specified. */ query?: string + /** Whether to perform pruning, omitting the non-significant tokens from the query to improve query performance. + * If prune is true but the pruning_config is not specified, pruning will occur but default values will be used. + * Default: false + * @experimental */ prune?: boolean + /** Optional pruning configuration. + * If enabled, this will omit non-significant tokens from the query in order to improve query performance. + * This is only used if prune is set to true. + * If prune is set to true but pruning_config is not specified, default values will be used. + * @experimental */ pruning_config?: QueryDslTokenPruningConfig } export interface QueryDslTermQuery extends QueryDslQueryBase { + /** Term you wish to find in the provided field. */ value: FieldValue + /** Allows ASCII case insensitive matching of the value with the indexed field values when set to `true`. + * When `false`, the case sensitivity of matching depends on the underlying field’s mapping. */ case_insensitive?: boolean } @@ -7255,23 +9444,34 @@ export type QueryDslTermsQuery = QueryDslTermsQueryKeys export type QueryDslTermsQueryField = FieldValue[] | QueryDslTermsLookup export interface QueryDslTermsSetQuery extends QueryDslQueryBase { + /** Specification describing number of matching terms required to return a document. */ minimum_should_match?: MinimumShouldMatch + /** Numeric field containing the number of matching terms required to return a document. */ minimum_should_match_field?: Field - minimum_should_match_script?: Script | string + /** Custom script containing the number of matching terms required to return a document. */ + minimum_should_match_script?: Script | ScriptSource + /** Array of terms you wish to find in the provided field. */ terms: FieldValue[] } export interface QueryDslTextExpansionQuery extends QueryDslQueryBase { + /** The text expansion NLP model to use */ model_id: string + /** The query text */ model_text: string + /** Token pruning configurations + * @experimental */ pruning_config?: QueryDslTokenPruningConfig } export type QueryDslTextQueryType = 'best_fields' | 'most_fields' | 'cross_fields' | 'phrase' | 'phrase_prefix' | 'bool_prefix' export interface QueryDslTokenPruningConfig { + /** Tokens whose frequency is more than this threshold times the average frequency of all tokens in the specified field are considered outliers and pruned. */ tokens_freq_ratio_threshold?: integer + /** Tokens whose weight is less than this threshold are considered nonsignificant and pruned. */ tokens_weight_threshold?: float + /** Whether to only score pruned tokens, vs only scoring kept tokens. */ only_score_pruned_tokens?: boolean } @@ -7288,38 +9488,53 @@ export interface QueryDslUntypedDistanceFeatureQuery extends QueryDslDistanceFea } export interface QueryDslUntypedRangeQuery extends QueryDslRangeQueryBase { + /** Date format used to convert `date` values in the query. */ format?: DateFormat + /** Coordinated Universal Time (UTC) offset or IANA time zone used to convert `date` values in the query to UTC. */ time_zone?: TimeZone } export interface QueryDslWeightedTokensQuery extends QueryDslQueryBase { + /** The tokens representing this query */ tokens: Record + /** Token pruning configurations */ pruning_config?: QueryDslTokenPruningConfig } export interface QueryDslWildcardQuery extends QueryDslQueryBase { + /** Allows case insensitive matching of the pattern with the indexed field values when set to true. Default is false which means the case sensitivity of matching depends on the underlying field’s mapping. */ case_insensitive?: boolean + /** Method used to rewrite the query. */ rewrite?: MultiTermQueryRewrite + /** Wildcard pattern for terms you wish to find in the provided field. Required, when wildcard is not set. */ value?: string + /** Wildcard pattern for terms you wish to find in the provided field. Required, when value is not set. */ wildcard?: string } export interface QueryDslWrapperQuery extends QueryDslQueryBase { + /** A base64 encoded query. + * The binary data format can be any of JSON, YAML, CBOR or SMILE encodings */ query: string } export type QueryDslZeroTermsQuery = 'all' | 'none' export interface AsyncSearchAsyncSearch> { + /** Partial aggregations results, coming from the shards that have already completed running the query. */ aggregations?: TAggregations _clusters?: ClusterStatistics fields?: Record hits: SearchHitsMetadata max_score?: double + /** Indicates how many reductions of the results have been performed. + * If this number increases compared to the last retrieved results for a get asynch search request, you can expect additional results included in the search response. */ num_reduce_phases?: long profile?: SearchProfile pit_id?: Id _scroll_id?: ScrollId + /** Indicates how many shards have run the query. + * Note that in order for shard results to be included in the search response, they need to be reduced first. */ _shards: ShardStatistics suggest?: Record[]> terminated_early?: boolean @@ -7333,18 +9548,27 @@ export interface AsyncSearchAsyncSearchDocumentResponseBase info + * > If the search failed after some shards returned their results or the node that is coordinating the async search dies, results may be partial even though `is_running` is `false`. */ is_running: boolean + /** Indicates when the async search will expire. */ expiration_time?: DateTime expiration_time_in_millis: EpochTime start_time?: DateTime start_time_in_millis: EpochTime + /** Indicates when the async search completed. + * It is present only when the search has completed. */ completion_time?: DateTime completion_time_in_millis?: EpochTime } export interface AsyncSearchDeleteRequest extends RequestBase { -/** A unique identifier for the async search. */ + /** A unique identifier for the async search. */ id: Id /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { id?: never } @@ -7355,13 +9579,19 @@ export interface AsyncSearchDeleteRequest extends RequestBase { export type AsyncSearchDeleteResponse = AcknowledgedResponseBase export interface AsyncSearchGetRequest extends RequestBase { -/** A unique identifier for the async search. */ + /** A unique identifier for the async search. */ id: Id - /** The length of time that the async search should be available in the cluster. When not specified, the `keep_alive` set with the corresponding submit async request will be used. Otherwise, it is possible to override the value and extend the validity of the request. When this period expires, the search, if still running, is cancelled. If the search is completed, its saved results are deleted. */ + /** The length of time that the async search should be available in the cluster. + * When not specified, the `keep_alive` set with the corresponding submit async request will be used. + * Otherwise, it is possible to override the value and extend the validity of the request. + * When this period expires, the search, if still running, is cancelled. + * If the search is completed, its saved results are deleted. */ keep_alive?: Duration /** Specify whether aggregation and suggester names should be prefixed by their respective types in the response */ typed_keys?: boolean - /** Specifies to wait for the search to be completed up until the provided timeout. Final results will be returned if available before the timeout expires, otherwise the currently available results will be returned once the timeout expires. By default no timeout is set meaning that the currently available results will be returned without any additional wait. */ + /** Specifies to wait for the search to be completed up until the provided timeout. + * Final results will be returned if available before the timeout expires, otherwise the currently available results will be returned once the timeout expires. + * By default no timeout is set meaning that the currently available results will be returned without any additional wait. */ wait_for_completion_timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { id?: never, keep_alive?: never, typed_keys?: never, wait_for_completion_timeout?: never } @@ -7372,9 +9602,10 @@ export interface AsyncSearchGetRequest extends RequestBase { export type AsyncSearchGetResponse> = AsyncSearchAsyncSearchDocumentResponseBase export interface AsyncSearchStatusRequest extends RequestBase { -/** A unique identifier for the async search. */ + /** A unique identifier for the async search. */ id: Id - /** The length of time that the async search needs to be available. Ongoing async searches and any saved search results are deleted after this period. */ + /** The length of time that the async search needs to be available. + * Ongoing async searches and any saved search results are deleted after this period. */ keep_alive?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { id?: never, keep_alive?: never } @@ -7385,17 +9616,25 @@ export interface AsyncSearchStatusRequest extends RequestBase { export type AsyncSearchStatusResponse = AsyncSearchStatusStatusResponseBase export interface AsyncSearchStatusStatusResponseBase extends AsyncSearchAsyncSearchResponseBase { + /** The number of shards that have run the query so far. */ _shards: ShardStatistics + /** Metadata about clusters involved in the cross-cluster search. + * It is not shown for local-only searches. */ _clusters?: ClusterStatistics + /** If the async search completed, this field shows the status code of the search. + * For example, `200` indicates that the async search was successfully completed. + * `503` indicates that the async search was completed with an error. */ completion_status?: integer } export interface AsyncSearchSubmitRequest extends RequestBase { -/** A comma-separated list of index names to search; use `_all` or empty string to perform the operation on all indices */ + /** A comma-separated list of index names to search; use `_all` or empty string to perform the operation on all indices */ index?: Indices - /** Blocks and waits until the search is completed up to a certain timeout. When the async search completes within the timeout, the response won’t include the ID as the results are not stored in the cluster. */ + /** Blocks and waits until the search is completed up to a certain timeout. + * When the async search completes within the timeout, the response won’t include the ID as the results are not stored in the cluster. */ wait_for_completion_timeout?: Duration - /** Specifies how long the async search needs to be available. Ongoing async searches and any saved search results are deleted after this period. */ + /** Specifies how long the async search needs to be available. + * Ongoing async searches and any saved search results are deleted after this period. */ keep_alive?: Duration /** If `true`, results are stored for later retrieval when the search completes within the `wait_for_completion_timeout`. */ keep_on_completion?: boolean @@ -7407,7 +9646,8 @@ export interface AsyncSearchSubmitRequest extends RequestBase { analyzer?: string /** Specify whether wildcard and prefix queries should be analyzed (default: false) */ analyze_wildcard?: boolean - /** Affects how often partial results become available, which happens whenever shard results are reduced. A partial reduction is performed every time the coordinating node has received a certain number of new shard responses (5 by default). */ + /** Affects how often partial results become available, which happens whenever shard results are reduced. + * A partial reduction is performed every time the coordinating node has received a certain number of new shard responses (5 by default). */ batched_reduce_size?: long /** The default value is the only supported value. */ ccs_minimize_roundtrips?: boolean @@ -7424,7 +9664,7 @@ export interface AsyncSearchSubmitRequest extends RequestBase { /** Specify whether format-based query failures (such as providing text to a numeric field) should be ignored */ lenient?: boolean /** The number of concurrent shard requests per node this search executes concurrently. This value should be used to limit the impact of the search on the cluster in order to limit the number of concurrent shard requests */ - max_concurrent_shard_requests?: long + max_concurrent_shard_requests?: integer /** Specify the node or shard the operation should be performed on (default: random) */ preference?: string /** Specify if request cache should be used for this request or not, defaults to true */ @@ -7459,18 +9699,25 @@ export interface AsyncSearchSubmitRequest extends RequestBase { explain?: boolean /** Configuration of search extensions defined by Elasticsearch plugins. */ ext?: Record - /** Starting document offset. By default, you cannot page through more than 10,000 hits using the from and size parameters. To page through more hits, use the search_after parameter. */ + /** Starting document offset. By default, you cannot page through more than 10,000 + * hits using the from and size parameters. To page through more hits, use the + * search_after parameter. */ from?: integer highlight?: SearchHighlight - /** Number of hits matching the query to count accurately. If true, the exact number of hits is returned at the cost of some performance. If false, the response does not include the total number of hits matching the query. Defaults to 10,000 hits. */ + /** Number of hits matching the query to count accurately. If true, the exact + * number of hits is returned at the cost of some performance. If false, the + * response does not include the total number of hits matching the query. + * Defaults to 10,000 hits. */ track_total_hits?: SearchTrackHits /** Boosts the _score of documents from specified indices. */ - indices_boost?: Record[] - /** Array of wildcard (*) patterns. The request returns doc values for field names matching these patterns in the hits.fields property of the response. */ + indices_boost?: Partial>[] + /** Array of wildcard (*) patterns. The request returns doc values for field + * names matching these patterns in the hits.fields property of the response. */ docvalue_fields?: (QueryDslFieldAndFormat | Field)[] /** Defines the approximate kNN search to run. */ knn?: KnnSearch | KnnSearch[] - /** Minimum _score for matching documents. Documents with a lower _score are not included in the search results. */ + /** Minimum _score for matching documents. Documents with a lower _score are + * not included in search results and results collected by aggregations. */ min_score?: double post_filter?: QueryDslQueryContainer profile?: boolean @@ -7480,32 +9727,48 @@ export interface AsyncSearchSubmitRequest extends RequestBase { /** Retrieve a script evaluation (based on different fields) for each hit. */ script_fields?: Record search_after?: SortResults - /** The number of hits to return. By default, you cannot page through more than 10,000 hits using the from and size parameters. To page through more hits, use the search_after parameter. */ + /** The number of hits to return. By default, you cannot page through more + * than 10,000 hits using the from and size parameters. To page through more + * hits, use the search_after parameter. */ size?: integer slice?: SlicedScroll sort?: Sort - /** Indicates which source fields are returned for matching documents. These fields are returned in the hits._source property of the search response. */ + /** Indicates which source fields are returned for matching documents. These + * fields are returned in the hits._source property of the search response. */ _source?: SearchSourceConfig - /** Array of wildcard (*) patterns. The request returns values for field names matching these patterns in the hits.fields property of the response. */ + /** Array of wildcard (*) patterns. The request returns values for field names + * matching these patterns in the hits.fields property of the response. */ fields?: (QueryDslFieldAndFormat | Field)[] suggest?: SearchSuggester - /** Maximum number of documents to collect for each shard. If a query reaches this limit, Elasticsearch terminates the query early. Elasticsearch collects documents before sorting. Defaults to 0, which does not terminate query execution early. */ + /** Maximum number of documents to collect for each shard. If a query reaches this + * limit, Elasticsearch terminates the query early. Elasticsearch collects documents + * before sorting. Defaults to 0, which does not terminate query execution early. */ terminate_after?: long - /** Specifies the period of time to wait for a response from each shard. If no response is received before the timeout expires, the request fails and returns an error. Defaults to no timeout. */ + /** Specifies the period of time to wait for a response from each shard. If no response + * is received before the timeout expires, the request fails and returns an error. + * Defaults to no timeout. */ timeout?: string /** If true, calculate and return document scores, even if the scores are not used for sorting. */ track_scores?: boolean /** If true, returns document version as part of a hit. */ version?: boolean - /** If true, returns sequence number and primary term of the last modification of each hit. See Optimistic concurrency control. */ + /** If true, returns sequence number and primary term of the last modification + * of each hit. See Optimistic concurrency control. */ seq_no_primary_term?: boolean - /** List of stored fields to return as part of a hit. If no fields are specified, no stored fields are included in the response. If this field is specified, the _source parameter defaults to false. You can pass _source: true to return both source fields and stored fields in the search response. */ + /** List of stored fields to return as part of a hit. If no fields are specified, + * no stored fields are included in the response. If this field is specified, the _source + * parameter defaults to false. You can pass _source: true to return both source fields + * and stored fields in the search response. */ stored_fields?: Fields - /** Limits the search to a point in time (PIT). If you provide a PIT, you cannot specify an in the request path. */ + /** Limits the search to a point in time (PIT). If you provide a PIT, you + * cannot specify an in the request path. */ pit?: SearchPointInTimeReference - /** Defines one or more runtime fields in the search request. These fields take precedence over mapped fields with the same name. */ + /** Defines one or more runtime fields in the search request. These fields take + * precedence over mapped fields with the same name. */ runtime_mappings?: MappingRuntimeFields - /** Stats groups to associate with the search. Each group maintains a statistics aggregation for its associated searches. You can retrieve these stats using the indices stats API. */ + /** Stats groups to associate with the search. Each group maintains a statistics + * aggregation for its associated searches. You can retrieve these stats using + * the indices stats API. */ stats?: string[] /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { index?: never, wait_for_completion_timeout?: never, keep_alive?: never, keep_on_completion?: never, allow_no_indices?: never, allow_partial_search_results?: never, analyzer?: never, analyze_wildcard?: never, batched_reduce_size?: never, ccs_minimize_roundtrips?: never, default_operator?: never, df?: never, expand_wildcards?: never, ignore_throttled?: never, ignore_unavailable?: never, lenient?: never, max_concurrent_shard_requests?: never, preference?: never, request_cache?: never, routing?: never, search_type?: never, suggest_field?: never, suggest_mode?: never, suggest_size?: never, suggest_text?: never, typed_keys?: never, rest_total_hits_as_int?: never, _source_excludes?: never, _source_includes?: never, q?: never, aggregations?: never, aggs?: never, collapse?: never, explain?: never, ext?: never, from?: never, highlight?: never, track_total_hits?: never, indices_boost?: never, docvalue_fields?: never, knn?: never, min_score?: never, post_filter?: never, profile?: never, query?: never, rescore?: never, script_fields?: never, search_after?: never, size?: never, slice?: never, sort?: never, _source?: never, fields?: never, suggest?: never, terminate_after?: never, timeout?: never, track_scores?: never, version?: never, seq_no_primary_term?: never, stored_fields?: never, pit?: never, runtime_mappings?: never, stats?: never } @@ -7517,13 +9780,15 @@ export type AsyncSearchSubmitResponse } export interface AutoscalingDeleteAutoscalingPolicyRequest extends RequestBase { -/** the name of the autoscaling policy */ + /** the name of the autoscaling policy */ name: Name - /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ + /** Period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration @@ -7563,7 +9828,8 @@ export interface AutoscalingGetAutoscalingCapacityAutoscalingResources { } export interface AutoscalingGetAutoscalingCapacityRequest extends RequestBase { -/** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ + /** Period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { master_timeout?: never } @@ -7576,9 +9842,10 @@ export interface AutoscalingGetAutoscalingCapacityResponse { } export interface AutoscalingGetAutoscalingPolicyRequest extends RequestBase { -/** the name of the autoscaling policy */ + /** the name of the autoscaling policy */ name: Name - /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ + /** Period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { name?: never, master_timeout?: never } @@ -7589,9 +9856,10 @@ export interface AutoscalingGetAutoscalingPolicyRequest extends RequestBase { export type AutoscalingGetAutoscalingPolicyResponse = AutoscalingAutoscalingPolicy export interface AutoscalingPutAutoscalingPolicyRequest extends RequestBase { -/** the name of the autoscaling policy */ + /** the name of the autoscaling policy */ name: Name - /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ + /** Period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration @@ -7628,35 +9896,69 @@ export type CatCatTransformColumn = 'changes_last_detection_time' | 'cldt' | 'ch export type CatCatTransformColumns = CatCatTransformColumn | CatCatTransformColumn[] export interface CatAliasesAliasesRecord { + /** alias name */ alias?: string + /** alias name + * @alias alias */ a?: string + /** index alias points to */ index?: IndexName + /** index alias points to + * @alias index */ i?: IndexName + /** index alias points to + * @alias index */ idx?: IndexName + /** filter */ filter?: string + /** filter + * @alias filter */ f?: string + /** filter + * @alias filter */ fi?: string + /** index routing */ 'routing.index'?: string + /** index routing + * @alias 'routing.index' */ ri?: string + /** index routing + * @alias 'routing.index' */ routingIndex?: string + /** search routing */ 'routing.search'?: string + /** search routing + * @alias 'routing.search' */ rs?: string + /** search routing + * @alias 'routing.search' */ routingSearch?: string + /** write index */ is_write_index?: string + /** write index + * @alias is_write_index */ w?: string + /** write index + * @alias is_write_index */ isWriteIndex?: string } export interface CatAliasesRequest extends CatCatRequestBase { -/** A comma-separated list of aliases to retrieve. Supports wildcards (`*`). To retrieve all aliases, omit this parameter or use `*` or `_all`. */ + /** A comma-separated list of aliases to retrieve. Supports wildcards (`*`). To retrieve all aliases, omit this parameter or use `*` or `_all`. */ name?: Names /** List of columns to appear in the response. Supports simple wildcards. */ h?: Names - /** List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. */ + /** List of columns that determine how the table should be sorted. + * Sorting defaults to ascending and can be changed by setting `:asc` + * or `:desc` as a suffix to the column name. */ s?: Names - /** The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports comma-separated values, such as `open,hidden`. */ + /** The type of index that wildcard patterns can match. + * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. + * It supports comma-separated values, such as `open,hidden`. */ expand_wildcards?: ExpandWildcards - /** The period to wait for a connection to the master node. If the master node is not available before the timeout expires, the request fails and returns an error. To indicated that the request should never timeout, you can set it to `-1`. */ + /** The period to wait for a connection to the master node. + * If the master node is not available before the timeout expires, the request fails and returns an error. + * To indicated that the request should never timeout, you can set it to `-1`. */ master_timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { name?: never, h?: never, s?: never, expand_wildcards?: never, master_timeout?: never } @@ -7667,51 +9969,127 @@ export interface CatAliasesRequest extends CatCatRequestBase { export type CatAliasesResponse = CatAliasesAliasesRecord[] export interface CatAllocationAllocationRecord { + /** Number of primary and replica shards assigned to the node. */ shards?: string + /** Number of primary and replica shards assigned to the node. + * @alias shards */ s?: string + /** Amount of shards that are scheduled to be moved elsewhere in the cluster or -1 other than desired balance allocator is used */ 'shards.undesired'?: string | null + /** Sum of index write load forecasts */ 'write_load.forecast'?: SpecUtilsStringified | null + /** Sum of index write load forecasts + * @alias 'write_load.forecast' */ wlf?: SpecUtilsStringified | null + /** Sum of index write load forecasts + * @alias 'write_load.forecast' */ writeLoadForecast?: SpecUtilsStringified | null + /** Sum of shard size forecasts */ 'disk.indices.forecast'?: ByteSize | null + /** Sum of shard size forecasts + * @alias 'disk.indices.forecast' */ dif?: ByteSize | null + /** Sum of shard size forecasts + * @alias 'disk.indices.forecast' */ diskIndicesForecast?: ByteSize | null + /** Disk space used by the node’s shards. Does not include disk space for the translog or unassigned shards. + * IMPORTANT: This metric double-counts disk space for hard-linked files, such as those created when shrinking, splitting, or cloning an index. */ 'disk.indices'?: ByteSize | null + /** Disk space used by the node’s shards. Does not include disk space for the translog or unassigned shards. + * IMPORTANT: This metric double-counts disk space for hard-linked files, such as those created when shrinking, splitting, or cloning an index. + * @alias 'disk.indices' */ di?: ByteSize | null + /** Disk space used by the node’s shards. Does not include disk space for the translog or unassigned shards. + * IMPORTANT: This metric double-counts disk space for hard-linked files, such as those created when shrinking, splitting, or cloning an index. + * @alias 'disk.indices' */ diskIndices?: ByteSize | null + /** Total disk space in use. + * Elasticsearch retrieves this metric from the node’s operating system (OS). + * The metric includes disk space for: Elasticsearch, including the translog and unassigned shards; the node’s operating system; any other applications or files on the node. + * Unlike `disk.indices`, this metric does not double-count disk space for hard-linked files. */ 'disk.used'?: ByteSize | null + /** Total disk space in use. + * Elasticsearch retrieves this metric from the node’s operating system (OS). + * The metric includes disk space for: Elasticsearch, including the translog and unassigned shards; the node’s operating system; any other applications or files on the node. + * Unlike `disk.indices`, this metric does not double-count disk space for hard-linked files. + * @alias 'disk.used' */ du?: ByteSize | null + /** Total disk space in use. + * Elasticsearch retrieves this metric from the node’s operating system (OS). + * The metric includes disk space for: Elasticsearch, including the translog and unassigned shards; the node’s operating system; any other applications or files on the node. + * Unlike `disk.indices`, this metric does not double-count disk space for hard-linked files. + * @alias 'disk.used' */ diskUsed?: ByteSize | null + /** Free disk space available to Elasticsearch. + * Elasticsearch retrieves this metric from the node’s operating system. + * Disk-based shard allocation uses this metric to assign shards to nodes based on available disk space. */ 'disk.avail'?: ByteSize | null + /** Free disk space available to Elasticsearch. + * Elasticsearch retrieves this metric from the node’s operating system. + * Disk-based shard allocation uses this metric to assign shards to nodes based on available disk space. + * @alias 'disk.avail' */ da?: ByteSize | null + /** Free disk space available to Elasticsearch. + * Elasticsearch retrieves this metric from the node’s operating system. + * Disk-based shard allocation uses this metric to assign shards to nodes based on available disk space. + * @alias 'disk.avail' */ diskAvail?: ByteSize | null + /** Total disk space for the node, including in-use and available space. */ 'disk.total'?: ByteSize | null + /** Total disk space for the node, including in-use and available space. + * @alias 'disk.total' */ dt?: ByteSize | null + /** Total disk space for the node, including in-use and available space. + * @alias 'disk.total' */ diskTotal?: ByteSize | null + /** Total percentage of disk space in use. Calculated as `disk.used / disk.total`. */ 'disk.percent'?: Percentage | null + /** Total percentage of disk space in use. Calculated as `disk.used / disk.total`. + * @alias 'disk.percent' */ dp?: Percentage | null + /** Total percentage of disk space in use. Calculated as `disk.used / disk.total`. + * @alias 'disk.percent' */ diskPercent?: Percentage | null + /** Network host for the node. Set using the `network.host` setting. */ host?: Host | null + /** Network host for the node. Set using the `network.host` setting. + * @alias host */ h?: Host | null + /** IP address and port for the node. */ ip?: Ip | null + /** Name for the node. Set using the `node.name` setting. */ node?: string + /** Name for the node. Set using the `node.name` setting. + * @alias node */ n?: string + /** Node roles */ 'node.role'?: string | null + /** Node roles + * @alias 'node.role' */ r?: string | null + /** Node roles + * @alias 'node.role' */ role?: string | null + /** Node roles + * @alias 'node.role' */ nodeRole?: string | null } export interface CatAllocationRequest extends CatCatRequestBase { -/** A comma-separated list of node identifiers or names used to limit the returned information. */ + /** A comma-separated list of node identifiers or names used to limit the returned information. */ node_id?: NodeIds /** The unit used to display byte values. */ bytes?: Bytes /** List of columns to appear in the response. Supports simple wildcards. */ h?: Names - /** List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. */ + /** List of columns that determine how the table should be sorted. + * Sorting defaults to ascending and can be changed by setting `:asc` + * or `:desc` as a suffix to the column name. */ s?: Names - /** If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node. */ + /** If `true`, the request computes the list of selected nodes from the + * local cluster state. If `false` the list of selected nodes are computed + * from the cluster state of the master node. In both cases the coordinating + * node will send requests for further information to each selected node. */ local?: boolean /** Period to wait for a connection to the master node. */ master_timeout?: Duration @@ -7734,13 +10112,20 @@ export interface CatComponentTemplatesComponentTemplate { } export interface CatComponentTemplatesRequest extends CatCatRequestBase { -/** The name of the component template. It accepts wildcard expressions. If it is omitted, all component templates are returned. */ + /** The name of the component template. + * It accepts wildcard expressions. + * If it is omitted, all component templates are returned. */ name?: string /** List of columns to appear in the response. Supports simple wildcards. */ h?: Names - /** List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. */ + /** List of columns that determine how the table should be sorted. + * Sorting defaults to ascending and can be changed by setting `:asc` + * or `:desc` as a suffix to the column name. */ s?: Names - /** If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node. */ + /** If `true`, the request computes the list of selected nodes from the + * local cluster state. If `false` the list of selected nodes are computed + * from the cluster state of the master node. In both cases the coordinating + * node will send requests for further information to each selected node. */ local?: boolean /** The period to wait for a connection to the master node. */ master_timeout?: Duration @@ -7753,25 +10138,48 @@ export interface CatComponentTemplatesRequest extends CatCatRequestBase { export type CatComponentTemplatesResponse = CatComponentTemplatesComponentTemplate[] export interface CatCountCountRecord { + /** seconds since 1970-01-01 00:00:00 */ epoch?: SpecUtilsStringified> + /** seconds since 1970-01-01 00:00:00 + * @alias epoch */ t?: SpecUtilsStringified> + /** seconds since 1970-01-01 00:00:00 + * @alias epoch */ time?: SpecUtilsStringified> + /** time in HH:MM:SS */ timestamp?: TimeOfDay + /** time in HH:MM:SS + * @alias timestamp */ ts?: TimeOfDay + /** time in HH:MM:SS + * @alias timestamp */ hms?: TimeOfDay + /** time in HH:MM:SS + * @alias timestamp */ hhmmss?: TimeOfDay + /** the document count */ count?: string + /** the document count + * @alias count */ dc?: string + /** the document count + * @alias count */ 'docs.count'?: string + /** the document count + * @alias count */ docsCount?: string } export interface CatCountRequest extends CatCatRequestBase { -/** A comma-separated list of data streams, indices, and aliases used to limit the request. It supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. */ + /** A comma-separated list of data streams, indices, and aliases used to limit the request. + * It supports wildcards (`*`). + * To target all data streams and indices, omit this parameter or use `*` or `_all`. */ index?: Indices /** List of columns to appear in the response. Supports simple wildcards. */ h?: Names - /** List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. */ + /** List of columns that determine how the table should be sorted. + * Sorting defaults to ascending and can be changed by setting `:asc` + * or `:desc` as a suffix to the column name. */ s?: Names /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { index?: never, h?: never, s?: never } @@ -7782,25 +10190,40 @@ export interface CatCountRequest extends CatCatRequestBase { export type CatCountResponse = CatCountCountRecord[] export interface CatFielddataFielddataRecord { + /** node id */ id?: string + /** host name */ host?: string + /** host name + * @alias host */ h?: string + /** ip address */ ip?: string + /** node name */ node?: string + /** node name + * @alias node */ n?: string + /** field name */ field?: string + /** field name + * @alias field */ f?: string + /** field data usage */ size?: string } export interface CatFielddataRequest extends CatCatRequestBase { -/** Comma-separated list of fields used to limit returned information. To retrieve all fields, omit this parameter. */ + /** Comma-separated list of fields used to limit returned information. + * To retrieve all fields, omit this parameter. */ fields?: Fields /** The unit used to display byte values. */ bytes?: Bytes /** List of columns to appear in the response. Supports simple wildcards. */ h?: Names - /** List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. */ + /** List of columns that determine how the table should be sorted. + * Sorting defaults to ascending and can be changed by setting `:asc` + * or `:desc` as a suffix to the column name. */ s?: Names /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { fields?: never, bytes?: never, h?: never, s?: never } @@ -7811,66 +10234,153 @@ export interface CatFielddataRequest extends CatCatRequestBase { export type CatFielddataResponse = CatFielddataFielddataRecord[] export interface CatHealthHealthRecord { + /** seconds since 1970-01-01 00:00:00 */ epoch?: SpecUtilsStringified> + /** seconds since 1970-01-01 00:00:00 + * @alias epoch */ time?: SpecUtilsStringified> + /** time in HH:MM:SS */ timestamp?: TimeOfDay + /** time in HH:MM:SS + * @alias timestamp */ ts?: TimeOfDay + /** time in HH:MM:SS + * @alias timestamp */ hms?: TimeOfDay + /** time in HH:MM:SS + * @alias timestamp */ hhmmss?: TimeOfDay + /** cluster name */ cluster?: string + /** cluster name + * @alias cluster */ cl?: string + /** health status */ status?: string + /** health status + * @alias status */ st?: string + /** total number of nodes */ 'node.total'?: string + /** total number of nodes + * @alias 'node.total' */ nt?: string + /** total number of nodes + * @alias 'node.total' */ nodeTotal?: string + /** number of nodes that can store data */ 'node.data'?: string + /** number of nodes that can store data + * @alias 'node.data' */ nd?: string + /** number of nodes that can store data + * @alias 'node.data' */ nodeData?: string + /** total number of shards */ shards?: string + /** total number of shards + * @alias shards */ t?: string + /** total number of shards + * @alias shards */ sh?: string + /** total number of shards + * @alias shards */ 'shards.total'?: string + /** total number of shards + * @alias shards */ shardsTotal?: string + /** number of primary shards */ pri?: string + /** number of primary shards + * @alias pri */ p?: string + /** number of primary shards + * @alias pri */ 'shards.primary'?: string + /** number of primary shards + * @alias pri */ shardsPrimary?: string + /** number of relocating nodes */ relo?: string + /** number of relocating nodes + * @alias relo */ r?: string + /** number of relocating nodes + * @alias relo */ 'shards.relocating'?: string + /** number of relocating nodes + * @alias relo */ shardsRelocating?: string + /** number of initializing nodes */ init?: string + /** number of initializing nodes + * @alias init */ i?: string + /** number of initializing nodes + * @alias init */ 'shards.initializing'?: string + /** number of initializing nodes + * @alias init */ shardsInitializing?: string + /** number of unassigned primary shards */ 'unassign.pri'?: string + /** number of unassigned primary shards + * @alias 'unassign.pri' */ up?: string + /** number of unassigned primary shards + * @alias 'unassign.pri' */ 'shards.unassigned.primary'?: string + /** number of unassigned primary shards + * @alias 'unassign.pri' */ shardsUnassignedPrimary?: string + /** number of unassigned shards */ unassign?: string + /** number of unassigned shards + * @alias unassign */ u?: string + /** number of unassigned shards + * @alias unassign */ 'shards.unassigned'?: string + /** number of unassigned shards + * @alias unassign */ shardsUnassigned?: string + /** number of pending tasks */ pending_tasks?: string + /** number of pending tasks + * @alias pending_tasks */ pt?: string + /** number of pending tasks + * @alias pending_tasks */ pendingTasks?: string + /** wait time of longest task pending */ max_task_wait_time?: string + /** wait time of longest task pending + * @alias max_task_wait_time */ mtwt?: string + /** wait time of longest task pending + * @alias max_task_wait_time */ maxTaskWaitTime?: string + /** active number of shards in percent */ active_shards_percent?: string + /** active number of shards in percent + * @alias active_shards_percent */ asp?: string + /** active number of shards in percent + * @alias active_shards_percent */ activeShardsPercent?: string } export interface CatHealthRequest extends CatCatRequestBase { -/** The unit used to display time values. */ + /** The unit used to display time values. */ time?: TimeUnit /** If true, returns `HH:MM:SS` and Unix epoch timestamps. */ ts?: boolean /** List of columns to appear in the response. Supports simple wildcards. */ h?: Names - /** List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. */ + /** List of columns that determine how the table should be sorted. + * Sorting defaults to ascending and can be changed by setting `:asc` + * or `:desc` as a suffix to the column name. */ s?: Names /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { time?: never, ts?: never, h?: never, s?: never } @@ -7891,298 +10401,733 @@ export interface CatHelpResponse { } export interface CatIndicesIndicesRecord { + /** current health status */ health?: string + /** current health status + * @alias health */ h?: string + /** open/close status */ status?: string + /** open/close status + * @alias status */ s?: string + /** index name */ index?: string + /** index name + * @alias index */ i?: string + /** index name + * @alias index */ idx?: string + /** index uuid */ uuid?: string + /** index uuid + * @alias uuid */ id?: string + /** number of primary shards */ pri?: string + /** number of primary shards + * @alias pri */ p?: string + /** number of primary shards + * @alias pri */ 'shards.primary'?: string + /** number of primary shards + * @alias pri */ shardsPrimary?: string + /** number of replica shards */ rep?: string + /** number of replica shards + * @alias rep */ r?: string + /** number of replica shards + * @alias rep */ 'shards.replica'?: string + /** number of replica shards + * @alias rep */ shardsReplica?: string + /** available docs */ 'docs.count'?: string | null + /** available docs + * @alias 'docs.count' */ dc?: string | null + /** available docs + * @alias 'docs.count' */ docsCount?: string | null + /** deleted docs */ 'docs.deleted'?: string | null + /** deleted docs + * @alias 'docs.deleted' */ dd?: string | null + /** deleted docs + * @alias 'docs.deleted' */ docsDeleted?: string | null + /** index creation date (millisecond value) */ 'creation.date'?: string + /** index creation date (millisecond value) + * @alias 'creation.date' */ cd?: string + /** index creation date (as string) */ 'creation.date.string'?: string + /** index creation date (as string) + * @alias 'creation.date.string' */ cds?: string + /** store size of primaries & replicas */ 'store.size'?: string | null + /** store size of primaries & replicas + * @alias 'store.size' */ ss?: string | null + /** store size of primaries & replicas + * @alias 'store.size' */ storeSize?: string | null + /** store size of primaries */ 'pri.store.size'?: string | null + /** total size of dataset (including the cache for partially mounted indices) */ 'dataset.size'?: string | null + /** size of completion */ 'completion.size'?: string + /** size of completion + * @alias 'completion.size' */ cs?: string + /** size of completion + * @alias 'completion.size' */ completionSize?: string + /** size of completion */ 'pri.completion.size'?: string + /** used fielddata cache */ 'fielddata.memory_size'?: string + /** used fielddata cache + * @alias 'fielddata.memory_size' */ fm?: string + /** used fielddata cache + * @alias 'fielddata.memory_size' */ fielddataMemory?: string + /** used fielddata cache */ 'pri.fielddata.memory_size'?: string + /** fielddata evictions */ 'fielddata.evictions'?: string + /** fielddata evictions + * @alias 'fielddata.evictions' */ fe?: string + /** fielddata evictions + * @alias 'fielddata.evictions' */ fielddataEvictions?: string + /** fielddata evictions */ 'pri.fielddata.evictions'?: string + /** used query cache */ 'query_cache.memory_size'?: string + /** used query cache + * @alias 'query_cache.memory_size' */ qcm?: string + /** used query cache + * @alias 'query_cache.memory_size' */ queryCacheMemory?: string + /** used query cache */ 'pri.query_cache.memory_size'?: string + /** query cache evictions */ 'query_cache.evictions'?: string + /** query cache evictions + * @alias 'query_cache.evictions' */ qce?: string + /** query cache evictions + * @alias 'query_cache.evictions' */ queryCacheEvictions?: string + /** query cache evictions */ 'pri.query_cache.evictions'?: string + /** used request cache */ 'request_cache.memory_size'?: string + /** used request cache + * @alias 'request_cache.memory_size' */ rcm?: string + /** used request cache + * @alias 'request_cache.memory_size' */ requestCacheMemory?: string + /** used request cache */ 'pri.request_cache.memory_size'?: string + /** request cache evictions */ 'request_cache.evictions'?: string + /** request cache evictions + * @alias 'request_cache.evictions' */ rce?: string + /** request cache evictions + * @alias 'request_cache.evictions' */ requestCacheEvictions?: string + /** request cache evictions */ 'pri.request_cache.evictions'?: string + /** request cache hit count */ 'request_cache.hit_count'?: string + /** request cache hit count + * @alias 'request_cache.hit_count' */ rchc?: string + /** request cache hit count + * @alias 'request_cache.hit_count' */ requestCacheHitCount?: string + /** request cache hit count */ 'pri.request_cache.hit_count'?: string + /** request cache miss count */ 'request_cache.miss_count'?: string + /** request cache miss count + * @alias 'request_cache.miss_count' */ rcmc?: string + /** request cache miss count + * @alias 'request_cache.miss_count' */ requestCacheMissCount?: string + /** request cache miss count */ 'pri.request_cache.miss_count'?: string + /** number of flushes */ 'flush.total'?: string + /** number of flushes + * @alias 'flush.total' */ ft?: string + /** number of flushes + * @alias 'flush.total' */ flushTotal?: string + /** number of flushes */ 'pri.flush.total'?: string + /** time spent in flush */ 'flush.total_time'?: string + /** time spent in flush + * @alias 'flush.total_time' */ ftt?: string + /** time spent in flush + * @alias 'flush.total_time' */ flushTotalTime?: string + /** time spent in flush */ 'pri.flush.total_time'?: string + /** number of current get ops */ 'get.current'?: string + /** number of current get ops + * @alias 'get.current' */ gc?: string + /** number of current get ops + * @alias 'get.current' */ getCurrent?: string + /** number of current get ops */ 'pri.get.current'?: string + /** time spent in get */ 'get.time'?: string + /** time spent in get + * @alias 'get.time' */ gti?: string + /** time spent in get + * @alias 'get.time' */ getTime?: string + /** time spent in get */ 'pri.get.time'?: string + /** number of get ops */ 'get.total'?: string + /** number of get ops + * @alias 'get.total' */ gto?: string + /** number of get ops + * @alias 'get.total' */ getTotal?: string + /** number of get ops */ 'pri.get.total'?: string + /** time spent in successful gets */ 'get.exists_time'?: string + /** time spent in successful gets + * @alias 'get.exists_time' */ geti?: string + /** time spent in successful gets + * @alias 'get.exists_time' */ getExistsTime?: string + /** time spent in successful gets */ 'pri.get.exists_time'?: string + /** number of successful gets */ 'get.exists_total'?: string + /** number of successful gets + * @alias 'get.exists_total' */ geto?: string + /** number of successful gets + * @alias 'get.exists_total' */ getExistsTotal?: string + /** number of successful gets */ 'pri.get.exists_total'?: string + /** time spent in failed gets */ 'get.missing_time'?: string + /** time spent in failed gets + * @alias 'get.missing_time' */ gmti?: string + /** time spent in failed gets + * @alias 'get.missing_time' */ getMissingTime?: string + /** time spent in failed gets */ 'pri.get.missing_time'?: string + /** number of failed gets */ 'get.missing_total'?: string + /** number of failed gets + * @alias 'get.missing_total' */ gmto?: string + /** number of failed gets + * @alias 'get.missing_total' */ getMissingTotal?: string + /** number of failed gets */ 'pri.get.missing_total'?: string + /** number of current deletions */ 'indexing.delete_current'?: string + /** number of current deletions + * @alias 'indexing.delete_current' */ idc?: string + /** number of current deletions + * @alias 'indexing.delete_current' */ indexingDeleteCurrent?: string + /** number of current deletions */ 'pri.indexing.delete_current'?: string + /** time spent in deletions */ 'indexing.delete_time'?: string + /** time spent in deletions + * @alias 'indexing.delete_time' */ idti?: string + /** time spent in deletions + * @alias 'indexing.delete_time' */ indexingDeleteTime?: string + /** time spent in deletions */ 'pri.indexing.delete_time'?: string + /** number of delete ops */ 'indexing.delete_total'?: string + /** number of delete ops + * @alias 'indexing.delete_total' */ idto?: string + /** number of delete ops + * @alias 'indexing.delete_total' */ indexingDeleteTotal?: string + /** number of delete ops */ 'pri.indexing.delete_total'?: string + /** number of current indexing ops */ 'indexing.index_current'?: string + /** number of current indexing ops + * @alias 'indexing.index_current' */ iic?: string + /** number of current indexing ops + * @alias 'indexing.index_current' */ indexingIndexCurrent?: string + /** number of current indexing ops */ 'pri.indexing.index_current'?: string + /** time spent in indexing */ 'indexing.index_time'?: string + /** time spent in indexing + * @alias 'indexing.index_time' */ iiti?: string + /** time spent in indexing + * @alias 'indexing.index_time' */ indexingIndexTime?: string + /** time spent in indexing */ 'pri.indexing.index_time'?: string + /** number of indexing ops */ 'indexing.index_total'?: string + /** number of indexing ops + * @alias 'indexing.index_total' */ iito?: string + /** number of indexing ops + * @alias 'indexing.index_total' */ indexingIndexTotal?: string + /** number of indexing ops */ 'pri.indexing.index_total'?: string + /** number of failed indexing ops */ 'indexing.index_failed'?: string + /** number of failed indexing ops + * @alias 'indexing.index_failed' */ iif?: string + /** number of failed indexing ops + * @alias 'indexing.index_failed' */ indexingIndexFailed?: string + /** number of failed indexing ops */ 'pri.indexing.index_failed'?: string + /** number of current merges */ 'merges.current'?: string + /** number of current merges + * @alias 'merges.current' */ mc?: string + /** number of current merges + * @alias 'merges.current' */ mergesCurrent?: string + /** number of current merges */ 'pri.merges.current'?: string + /** number of current merging docs */ 'merges.current_docs'?: string + /** number of current merging docs + * @alias 'merges.current_docs' */ mcd?: string + /** number of current merging docs + * @alias 'merges.current_docs' */ mergesCurrentDocs?: string + /** number of current merging docs */ 'pri.merges.current_docs'?: string + /** size of current merges */ 'merges.current_size'?: string + /** size of current merges + * @alias 'merges.current_size' */ mcs?: string + /** size of current merges + * @alias 'merges.current_size' */ mergesCurrentSize?: string + /** size of current merges */ 'pri.merges.current_size'?: string + /** number of completed merge ops */ 'merges.total'?: string + /** number of completed merge ops + * @alias 'merges.total' */ mt?: string + /** number of completed merge ops + * @alias 'merges.total' */ mergesTotal?: string + /** number of completed merge ops */ 'pri.merges.total'?: string + /** docs merged */ 'merges.total_docs'?: string + /** docs merged + * @alias 'merges.total_docs' */ mtd?: string + /** docs merged + * @alias 'merges.total_docs' */ mergesTotalDocs?: string + /** docs merged */ 'pri.merges.total_docs'?: string + /** size merged */ 'merges.total_size'?: string + /** size merged + * @alias 'merges.total_size' */ mts?: string + /** size merged + * @alias 'merges.total_size' */ mergesTotalSize?: string + /** size merged */ 'pri.merges.total_size'?: string + /** time spent in merges */ 'merges.total_time'?: string + /** time spent in merges + * @alias 'merges.total_time' */ mtt?: string + /** time spent in merges + * @alias 'merges.total_time' */ mergesTotalTime?: string + /** time spent in merges */ 'pri.merges.total_time'?: string + /** total refreshes */ 'refresh.total'?: string + /** total refreshes + * @alias 'refresh.total' */ rto?: string + /** total refreshes + * @alias 'refresh.total' */ refreshTotal?: string + /** total refreshes */ 'pri.refresh.total'?: string + /** time spent in refreshes */ 'refresh.time'?: string + /** time spent in refreshes + * @alias 'refresh.time' */ rti?: string + /** time spent in refreshes + * @alias 'refresh.time' */ refreshTime?: string + /** time spent in refreshes */ 'pri.refresh.time'?: string + /** total external refreshes */ 'refresh.external_total'?: string + /** total external refreshes + * @alias 'refresh.external_total' */ reto?: string + /** total external refreshes */ 'pri.refresh.external_total'?: string + /** time spent in external refreshes */ 'refresh.external_time'?: string + /** time spent in external refreshes + * @alias 'refresh.external_time' */ reti?: string + /** time spent in external refreshes */ 'pri.refresh.external_time'?: string + /** number of pending refresh listeners */ 'refresh.listeners'?: string + /** number of pending refresh listeners + * @alias 'refresh.listeners' */ rli?: string + /** number of pending refresh listeners + * @alias 'refresh.listeners' */ refreshListeners?: string + /** number of pending refresh listeners */ 'pri.refresh.listeners'?: string + /** current fetch phase ops */ 'search.fetch_current'?: string + /** current fetch phase ops + * @alias 'search.fetch_current' */ sfc?: string + /** current fetch phase ops + * @alias 'search.fetch_current' */ searchFetchCurrent?: string + /** current fetch phase ops */ 'pri.search.fetch_current'?: string + /** time spent in fetch phase */ 'search.fetch_time'?: string + /** time spent in fetch phase + * @alias 'search.fetch_time' */ sfti?: string + /** time spent in fetch phase + * @alias 'search.fetch_time' */ searchFetchTime?: string + /** time spent in fetch phase */ 'pri.search.fetch_time'?: string + /** total fetch ops */ 'search.fetch_total'?: string + /** total fetch ops + * @alias 'search.fetch_total' */ sfto?: string + /** total fetch ops + * @alias 'search.fetch_total' */ searchFetchTotal?: string + /** total fetch ops */ 'pri.search.fetch_total'?: string + /** open search contexts */ 'search.open_contexts'?: string + /** open search contexts + * @alias 'search.open_contexts' */ so?: string + /** open search contexts + * @alias 'search.open_contexts' */ searchOpenContexts?: string + /** open search contexts */ 'pri.search.open_contexts'?: string + /** current query phase ops */ 'search.query_current'?: string + /** current query phase ops + * @alias 'search.query_current' */ sqc?: string + /** current query phase ops + * @alias 'search.query_current' */ searchQueryCurrent?: string + /** current query phase ops */ 'pri.search.query_current'?: string + /** time spent in query phase */ 'search.query_time'?: string + /** time spent in query phase + * @alias 'search.query_time' */ sqti?: string + /** time spent in query phase + * @alias 'search.query_time' */ searchQueryTime?: string + /** time spent in query phase */ 'pri.search.query_time'?: string + /** total query phase ops */ 'search.query_total'?: string + /** total query phase ops + * @alias 'search.query_total' */ sqto?: string + /** total query phase ops + * @alias 'search.query_total' */ searchQueryTotal?: string + /** total query phase ops */ 'pri.search.query_total'?: string + /** open scroll contexts */ 'search.scroll_current'?: string + /** open scroll contexts + * @alias 'search.scroll_current' */ scc?: string + /** open scroll contexts + * @alias 'search.scroll_current' */ searchScrollCurrent?: string + /** open scroll contexts */ 'pri.search.scroll_current'?: string + /** time scroll contexts held open */ 'search.scroll_time'?: string + /** time scroll contexts held open + * @alias 'search.scroll_time' */ scti?: string + /** time scroll contexts held open + * @alias 'search.scroll_time' */ searchScrollTime?: string + /** time scroll contexts held open */ 'pri.search.scroll_time'?: string + /** completed scroll contexts */ 'search.scroll_total'?: string + /** completed scroll contexts + * @alias 'search.scroll_total' */ scto?: string + /** completed scroll contexts + * @alias 'search.scroll_total' */ searchScrollTotal?: string + /** completed scroll contexts */ 'pri.search.scroll_total'?: string + /** number of segments */ 'segments.count'?: string + /** number of segments + * @alias 'segments.count' */ sc?: string + /** number of segments + * @alias 'segments.count' */ segmentsCount?: string + /** number of segments */ 'pri.segments.count'?: string + /** memory used by segments */ 'segments.memory'?: string + /** memory used by segments + * @alias 'segments.memory' */ sm?: string + /** memory used by segments + * @alias 'segments.memory' */ segmentsMemory?: string + /** memory used by segments */ 'pri.segments.memory'?: string + /** memory used by index writer */ 'segments.index_writer_memory'?: string + /** memory used by index writer + * @alias 'segments.index_writer_memory' */ siwm?: string + /** memory used by index writer + * @alias 'segments.index_writer_memory' */ segmentsIndexWriterMemory?: string + /** memory used by index writer */ 'pri.segments.index_writer_memory'?: string + /** memory used by version map */ 'segments.version_map_memory'?: string + /** memory used by version map + * @alias 'segments.version_map_memory' */ svmm?: string + /** memory used by version map + * @alias 'segments.version_map_memory' */ segmentsVersionMapMemory?: string + /** memory used by version map */ 'pri.segments.version_map_memory'?: string + /** memory used by fixed bit sets for nested object field types and export type filters for types referred in _parent fields */ 'segments.fixed_bitset_memory'?: string + /** memory used by fixed bit sets for nested object field types and export type filters for types referred in _parent fields + * @alias 'segments.fixed_bitset_memory' */ sfbm?: string + /** memory used by fixed bit sets for nested object field types and export type filters for types referred in _parent fields + * @alias 'segments.fixed_bitset_memory' */ fixedBitsetMemory?: string + /** memory used by fixed bit sets for nested object field types and export type filters for types referred in _parent fields */ 'pri.segments.fixed_bitset_memory'?: string + /** current warmer ops */ 'warmer.current'?: string + /** current warmer ops + * @alias 'warmer.current' */ wc?: string + /** current warmer ops + * @alias 'warmer.current' */ warmerCurrent?: string + /** current warmer ops */ 'pri.warmer.current'?: string + /** total warmer ops */ 'warmer.total'?: string + /** total warmer ops + * @alias 'warmer.total' */ wto?: string + /** total warmer ops + * @alias 'warmer.total' */ warmerTotal?: string + /** total warmer ops */ 'pri.warmer.total'?: string + /** time spent in warmers */ 'warmer.total_time'?: string + /** time spent in warmers + * @alias 'warmer.total_time' */ wtt?: string + /** time spent in warmers + * @alias 'warmer.total_time' */ warmerTotalTime?: string + /** time spent in warmers */ 'pri.warmer.total_time'?: string + /** number of current suggest ops */ 'suggest.current'?: string + /** number of current suggest ops + * @alias 'suggest.current' */ suc?: string + /** number of current suggest ops + * @alias 'suggest.current' */ suggestCurrent?: string + /** number of current suggest ops */ 'pri.suggest.current'?: string + /** time spend in suggest */ 'suggest.time'?: string + /** time spend in suggest + * @alias 'suggest.time' */ suti?: string + /** time spend in suggest + * @alias 'suggest.time' */ suggestTime?: string + /** time spend in suggest */ 'pri.suggest.time'?: string + /** number of suggest ops */ 'suggest.total'?: string + /** number of suggest ops + * @alias 'suggest.total' */ suto?: string + /** number of suggest ops + * @alias 'suggest.total' */ suggestTotal?: string + /** number of suggest ops */ 'pri.suggest.total'?: string + /** total used memory */ 'memory.total'?: string + /** total used memory + * @alias 'memory.total' */ tm?: string + /** total used memory + * @alias 'memory.total' */ memoryTotal?: string + /** total user memory */ 'pri.memory.total'?: string + /** indicates if the index is search throttled */ 'search.throttled'?: string + /** indicates if the index is search throttled + * @alias 'search.throttled' */ sth?: string + /** number of bulk shard ops */ 'bulk.total_operations'?: string + /** number of bulk shard ops + * @alias 'bulk.total_operations' */ bto?: string + /** number of bulk shard ops + * @alias 'bulk.total_operations' */ bulkTotalOperation?: string + /** number of bulk shard ops */ 'pri.bulk.total_operations'?: string + /** time spend in shard bulk */ 'bulk.total_time'?: string + /** time spend in shard bulk + * @alias 'bulk.total_time' */ btti?: string + /** time spend in shard bulk + * @alias 'bulk.total_time' */ bulkTotalTime?: string + /** time spend in shard bulk */ 'pri.bulk.total_time'?: string + /** total size in bytes of shard bulk */ 'bulk.total_size_in_bytes'?: string + /** total size in bytes of shard bulk + * @alias 'bulk.total_size_in_bytes' */ btsi?: string + /** total size in bytes of shard bulk + * @alias 'bulk.total_size_in_bytes' */ bulkTotalSizeInBytes?: string + /** total size in bytes of shard bulk */ 'pri.bulk.total_size_in_bytes'?: string + /** average time spend in shard bulk */ 'bulk.avg_time'?: string + /** average time spend in shard bulk + * @alias 'bulk.avg_time' */ bati?: string + /** average time spend in shard bulk + * @alias 'bulk.avg_time' */ bulkAvgTime?: string + /** average time spend in shard bulk */ 'pri.bulk.avg_time'?: string + /** average size in bytes of shard bulk */ 'bulk.avg_size_in_bytes'?: string + /** average size in bytes of shard bulk + * @alias 'bulk.avg_size_in_bytes' */ basi?: string + /** average size in bytes of shard bulk + * @alias 'bulk.avg_size_in_bytes' */ bulkAvgSizeInBytes?: string + /** average size in bytes of shard bulk */ 'pri.bulk.avg_size_in_bytes'?: string } export interface CatIndicesRequest extends CatCatRequestBase { -/** Comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. */ + /** Comma-separated list of data streams, indices, and aliases used to limit the request. + * Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. */ index?: Indices /** The unit used to display byte values. */ bytes?: Bytes @@ -8200,7 +11145,9 @@ export interface CatIndicesRequest extends CatCatRequestBase { master_timeout?: Duration /** List of columns to appear in the response. Supports simple wildcards. */ h?: Names - /** List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. */ + /** List of columns that determine how the table should be sorted. + * Sorting defaults to ascending and can be changed by setting `:asc` + * or `:desc` as a suffix to the column name. */ s?: Names /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { index?: never, bytes?: never, expand_wildcards?: never, health?: never, include_unloaded_segments?: never, pri?: never, time?: never, master_timeout?: never, h?: never, s?: never } @@ -8211,20 +11158,33 @@ export interface CatIndicesRequest extends CatCatRequestBase { export type CatIndicesResponse = CatIndicesIndicesRecord[] export interface CatMasterMasterRecord { + /** node id */ id?: string + /** host name */ host?: string + /** host name + * @alias host */ h?: string + /** ip address */ ip?: string + /** node name */ node?: string + /** node name + * @alias node */ n?: string } export interface CatMasterRequest extends CatCatRequestBase { -/** List of columns to appear in the response. Supports simple wildcards. */ + /** List of columns to appear in the response. Supports simple wildcards. */ h?: Names - /** List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. */ + /** List of columns that determine how the table should be sorted. + * Sorting defaults to ascending and can be changed by setting `:asc` + * or `:desc` as a suffix to the column name. */ s?: Names - /** If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node. */ + /** If `true`, the request computes the list of selected nodes from the + * local cluster state. If `false` the list of selected nodes are computed + * from the cluster state of the master node. In both cases the coordinating + * node will send requests for further information to each selected node. */ local?: boolean /** Period to wait for a connection to the master node. */ master_timeout?: Duration @@ -8237,51 +11197,117 @@ export interface CatMasterRequest extends CatCatRequestBase { export type CatMasterResponse = CatMasterMasterRecord[] export interface CatMlDataFrameAnalyticsDataFrameAnalyticsRecord { + /** The identifier for the job. */ id?: Id + /** The type of analysis that the job performs. */ type?: string + /** The type of analysis that the job performs. + * @alias type */ t?: string + /** The time when the job was created. */ create_time?: string + /** The time when the job was created. + * @alias create_time */ ct?: string + /** The time when the job was created. + * @alias create_time */ createTime?: string + /** The version of Elasticsearch when the job was created. */ version?: VersionString + /** The version of Elasticsearch when the job was created. + * @alias version */ v?: VersionString + /** The name of the source index. */ source_index?: IndexName + /** The name of the source index. + * @alias source_index */ si?: IndexName + /** The name of the source index. + * @alias source_index */ sourceIndex?: IndexName + /** The name of the destination index. */ dest_index?: IndexName + /** The name of the destination index. + * @alias dest_index */ di?: IndexName + /** The name of the destination index. + * @alias dest_index */ destIndex?: IndexName + /** A description of the job. */ description?: string + /** A description of the job. + * @alias description */ d?: string + /** The approximate maximum amount of memory resources that are permitted for the job. */ model_memory_limit?: string + /** The approximate maximum amount of memory resources that are permitted for the job. + * @alias model_memory_limit */ mml?: string + /** The approximate maximum amount of memory resources that are permitted for the job. + * @alias model_memory_limit */ modelMemoryLimit?: string + /** The current status of the job. */ state?: string + /** The current status of the job. + * @alias state */ s?: string + /** Messages about the reason why the job failed. */ failure_reason?: string + /** Messages about the reason why the job failed. + * @alias failure_reason */ fr?: string + /** Messages about the reason why the job failed. + * @alias failure_reason */ failureReason?: string + /** The progress report for the job by phase. */ progress?: string + /** The progress report for the job by phase. + * @alias progress */ p?: string + /** Messages related to the selection of a node. */ assignment_explanation?: string + /** Messages related to the selection of a node. + * @alias assignment_explanation */ ae?: string + /** Messages related to the selection of a node. + * @alias assignment_explanation */ assignmentExplanation?: string + /** The unique identifier of the assigned node. */ 'node.id'?: Id + /** The unique identifier of the assigned node. + * @alias 'node.id' */ ni?: Id + /** The unique identifier of the assigned node. + * @alias 'node.id' */ nodeId?: Id + /** The name of the assigned node. */ 'node.name'?: Name + /** The name of the assigned node. + * @alias 'node.name' */ nn?: Name + /** The name of the assigned node. + * @alias 'node.name' */ nodeName?: Name + /** The ephemeral identifier of the assigned node. */ 'node.ephemeral_id'?: Id + /** The ephemeral identifier of the assigned node. + * @alias 'node.ephemeral_id' */ ne?: Id + /** The ephemeral identifier of the assigned node. + * @alias 'node.ephemeral_id' */ nodeEphemeralId?: Id + /** The network address of the assigned node. */ 'node.address'?: string + /** The network address of the assigned node. + * @alias 'node.address' */ na?: string + /** The network address of the assigned node. + * @alias 'node.address' */ nodeAddress?: string } export interface CatMlDataFrameAnalyticsRequest extends CatCatRequestBase { -/** The ID of the data frame analytics to fetch */ + /** The ID of the data frame analytics to fetch */ id?: Id /** Whether to ignore if a wildcard expression matches no configs. (This includes `_all` string or when no configs have been specified) */ allow_no_match?: boolean @@ -8289,7 +11315,8 @@ export interface CatMlDataFrameAnalyticsRequest extends CatCatRequestBase { bytes?: Bytes /** Comma-separated list of column names to display. */ h?: CatCatDfaColumns - /** Comma-separated list of column names or column aliases used to sort the response. */ + /** Comma-separated list of column names or column aliases used to sort the + * response. */ s?: CatCatDfaColumns /** Unit used to display time values. */ time?: TimeUnit @@ -8302,44 +11329,116 @@ export interface CatMlDataFrameAnalyticsRequest extends CatCatRequestBase { export type CatMlDataFrameAnalyticsResponse = CatMlDataFrameAnalyticsDataFrameAnalyticsRecord[] export interface CatMlDatafeedsDatafeedsRecord { + /** The datafeed identifier. */ id?: string + /** The status of the datafeed. */ state?: MlDatafeedState + /** The status of the datafeed. + * @alias state */ s?: MlDatafeedState + /** For started datafeeds only, contains messages relating to the selection of a node. */ assignment_explanation?: string + /** For started datafeeds only, contains messages relating to the selection of a node. + * @alias assignment_explanation */ ae?: string + /** The number of buckets processed. */ 'buckets.count'?: string + /** The number of buckets processed. + * @alias 'buckets.count' */ bc?: string + /** The number of buckets processed. + * @alias 'buckets.count' */ bucketsCount?: string + /** The number of searches run by the datafeed. */ 'search.count'?: string + /** The number of searches run by the datafeed. + * @alias 'search.count' */ sc?: string + /** The number of searches run by the datafeed. + * @alias 'search.count' */ searchCount?: string + /** The total time the datafeed spent searching, in milliseconds. */ 'search.time'?: string + /** The total time the datafeed spent searching, in milliseconds. + * @alias 'search.time' */ st?: string + /** The total time the datafeed spent searching, in milliseconds. + * @alias 'search.time' */ searchTime?: string + /** The average search time per bucket, in milliseconds. */ 'search.bucket_avg'?: string + /** The average search time per bucket, in milliseconds. + * @alias 'search.bucket_avg' */ sba?: string + /** The average search time per bucket, in milliseconds. + * @alias 'search.bucket_avg' */ searchBucketAvg?: string + /** The exponential average search time per hour, in milliseconds. */ 'search.exp_avg_hour'?: string + /** The exponential average search time per hour, in milliseconds. + * @alias 'search.exp_avg_hour' */ seah?: string + /** The exponential average search time per hour, in milliseconds. + * @alias 'search.exp_avg_hour' */ searchExpAvgHour?: string + /** The unique identifier of the assigned node. + * For started datafeeds only, this information pertains to the node upon which the datafeed is started. */ 'node.id'?: string + /** The unique identifier of the assigned node. + * For started datafeeds only, this information pertains to the node upon which the datafeed is started. + * @alias 'node.id' */ ni?: string + /** The unique identifier of the assigned node. + * For started datafeeds only, this information pertains to the node upon which the datafeed is started. + * @alias 'node.id' */ nodeId?: string + /** The name of the assigned node. + * For started datafeeds only, this information pertains to the node upon which the datafeed is started. */ 'node.name'?: string + /** The name of the assigned node. + * For started datafeeds only, this information pertains to the node upon which the datafeed is started. + * @alias 'node.name' */ nn?: string + /** The name of the assigned node. + * For started datafeeds only, this information pertains to the node upon which the datafeed is started. + * @alias 'node.name' */ nodeName?: string + /** The ephemeral identifier of the assigned node. + * For started datafeeds only, this information pertains to the node upon which the datafeed is started. */ 'node.ephemeral_id'?: string + /** The ephemeral identifier of the assigned node. + * For started datafeeds only, this information pertains to the node upon which the datafeed is started. + * @alias 'node.ephemeral_id' */ ne?: string + /** The ephemeral identifier of the assigned node. + * For started datafeeds only, this information pertains to the node upon which the datafeed is started. + * @alias 'node.ephemeral_id' */ nodeEphemeralId?: string + /** The network address of the assigned node. + * For started datafeeds only, this information pertains to the node upon which the datafeed is started. */ 'node.address'?: string + /** The network address of the assigned node. + * For started datafeeds only, this information pertains to the node upon which the datafeed is started. + * @alias 'node.address' */ na?: string + /** The network address of the assigned node. + * For started datafeeds only, this information pertains to the node upon which the datafeed is started. + * @alias 'node.address' */ nodeAddress?: string } export interface CatMlDatafeedsRequest extends CatCatRequestBase { -/** A numerical character string that uniquely identifies the datafeed. */ + /** A numerical character string that uniquely identifies the datafeed. */ datafeed_id?: Id - /** Specifies what to do when the request: * Contains wildcard expressions and there are no datafeeds that match. * Contains the `_all` string or no identifiers and there are no matches. * Contains wildcard expressions and there are only partial matches. If `true`, the API returns an empty datafeeds array when there are no matches and the subset of results when there are partial matches. If `false`, the API returns a 404 status code when there are no matches or only partial matches. */ + /** Specifies what to do when the request: + * + * * Contains wildcard expressions and there are no datafeeds that match. + * * Contains the `_all` string or no identifiers and there are no matches. + * * Contains wildcard expressions and there are only partial matches. + * + * If `true`, the API returns an empty datafeeds array when there are no matches and the subset of results when + * there are partial matches. If `false`, the API returns a 404 status code when there are no matches or only + * partial matches. */ allow_no_match?: boolean /** Comma-separated list of column names to display. */ h?: CatCatDatafeedColumns @@ -8356,186 +11455,551 @@ export interface CatMlDatafeedsRequest extends CatCatRequestBase { export type CatMlDatafeedsResponse = CatMlDatafeedsDatafeedsRecord[] export interface CatMlJobsJobsRecord { + /** The anomaly detection job identifier. */ id?: Id + /** The status of the anomaly detection job. */ state?: MlJobState + /** The status of the anomaly detection job. + * @alias state */ s?: MlJobState + /** For open jobs only, the amount of time the job has been opened. */ opened_time?: string + /** For open jobs only, the amount of time the job has been opened. + * @alias opened_time */ ot?: string + /** For open anomaly detection jobs only, contains messages relating to the selection of a node to run the job. */ assignment_explanation?: string + /** For open anomaly detection jobs only, contains messages relating to the selection of a node to run the job. + * @alias assignment_explanation */ ae?: string + /** The number of input documents that have been processed by the anomaly detection job. + * This value includes documents with missing fields, since they are nonetheless analyzed. + * If you use datafeeds and have aggregations in your search query, the `processed_record_count` is the number of aggregation results processed, not the number of Elasticsearch documents. */ 'data.processed_records'?: string + /** The number of input documents that have been processed by the anomaly detection job. + * This value includes documents with missing fields, since they are nonetheless analyzed. + * If you use datafeeds and have aggregations in your search query, the `processed_record_count` is the number of aggregation results processed, not the number of Elasticsearch documents. + * @alias 'data.processed_records' */ dpr?: string + /** The number of input documents that have been processed by the anomaly detection job. + * This value includes documents with missing fields, since they are nonetheless analyzed. + * If you use datafeeds and have aggregations in your search query, the `processed_record_count` is the number of aggregation results processed, not the number of Elasticsearch documents. + * @alias 'data.processed_records' */ dataProcessedRecords?: string + /** The total number of fields in all the documents that have been processed by the anomaly detection job. + * Only fields that are specified in the detector configuration object contribute to this count. + * The timestamp is not included in this count. */ 'data.processed_fields'?: string + /** The total number of fields in all the documents that have been processed by the anomaly detection job. + * Only fields that are specified in the detector configuration object contribute to this count. + * The timestamp is not included in this count. + * @alias 'data.processed_fields' */ dpf?: string + /** The total number of fields in all the documents that have been processed by the anomaly detection job. + * Only fields that are specified in the detector configuration object contribute to this count. + * The timestamp is not included in this count. + * @alias 'data.processed_fields' */ dataProcessedFields?: string + /** The number of bytes of input data posted to the anomaly detection job. */ 'data.input_bytes'?: ByteSize + /** The number of bytes of input data posted to the anomaly detection job. + * @alias 'data.input_bytes' */ dib?: ByteSize + /** The number of bytes of input data posted to the anomaly detection job. + * @alias 'data.input_bytes' */ dataInputBytes?: ByteSize + /** The number of input documents posted to the anomaly detection job. */ 'data.input_records'?: string + /** The number of input documents posted to the anomaly detection job. + * @alias 'data.input_records' */ dir?: string + /** The number of input documents posted to the anomaly detection job. + * @alias 'data.input_records' */ dataInputRecords?: string + /** The total number of fields in input documents posted to the anomaly detection job. + * This count includes fields that are not used in the analysis. + * However, be aware that if you are using a datafeed, it extracts only the required fields from the documents it retrieves before posting them to the job. */ 'data.input_fields'?: string + /** The total number of fields in input documents posted to the anomaly detection job. + * This count includes fields that are not used in the analysis. + * However, be aware that if you are using a datafeed, it extracts only the required fields from the documents it retrieves before posting them to the job. + * @alias 'data.input_fields' */ dif?: string + /** The total number of fields in input documents posted to the anomaly detection job. + * This count includes fields that are not used in the analysis. + * However, be aware that if you are using a datafeed, it extracts only the required fields from the documents it retrieves before posting them to the job. + * @alias 'data.input_fields' */ dataInputFields?: string + /** The number of input documents with either a missing date field or a date that could not be parsed. */ 'data.invalid_dates'?: string + /** The number of input documents with either a missing date field or a date that could not be parsed. + * @alias 'data.invalid_dates' */ did?: string + /** The number of input documents with either a missing date field or a date that could not be parsed. + * @alias 'data.invalid_dates' */ dataInvalidDates?: string + /** The number of input documents that are missing a field that the anomaly detection job is configured to analyze. + * Input documents with missing fields are still processed because it is possible that not all fields are missing. + * If you are using datafeeds or posting data to the job in JSON format, a high `missing_field_count` is often not an indication of data issues. + * It is not necessarily a cause for concern. */ 'data.missing_fields'?: string + /** The number of input documents that are missing a field that the anomaly detection job is configured to analyze. + * Input documents with missing fields are still processed because it is possible that not all fields are missing. + * If you are using datafeeds or posting data to the job in JSON format, a high `missing_field_count` is often not an indication of data issues. + * It is not necessarily a cause for concern. + * @alias 'data.missing_fields' */ dmf?: string + /** The number of input documents that are missing a field that the anomaly detection job is configured to analyze. + * Input documents with missing fields are still processed because it is possible that not all fields are missing. + * If you are using datafeeds or posting data to the job in JSON format, a high `missing_field_count` is often not an indication of data issues. + * It is not necessarily a cause for concern. + * @alias 'data.missing_fields' */ dataMissingFields?: string + /** The number of input documents that have a timestamp chronologically preceding the start of the current anomaly detection bucket offset by the latency window. + * This information is applicable only when you provide data to the anomaly detection job by using the post data API. + * These out of order documents are discarded, since jobs require time series data to be in ascending chronological order. */ 'data.out_of_order_timestamps'?: string + /** The number of input documents that have a timestamp chronologically preceding the start of the current anomaly detection bucket offset by the latency window. + * This information is applicable only when you provide data to the anomaly detection job by using the post data API. + * These out of order documents are discarded, since jobs require time series data to be in ascending chronological order. + * @alias 'data.out_of_order_timestamps' */ doot?: string + /** The number of input documents that have a timestamp chronologically preceding the start of the current anomaly detection bucket offset by the latency window. + * This information is applicable only when you provide data to the anomaly detection job by using the post data API. + * These out of order documents are discarded, since jobs require time series data to be in ascending chronological order. + * @alias 'data.out_of_order_timestamps' */ dataOutOfOrderTimestamps?: string + /** The number of buckets which did not contain any data. + * If your data contains many empty buckets, consider increasing your `bucket_span` or using functions that are tolerant to gaps in data such as mean, `non_null_sum` or `non_zero_count`. */ 'data.empty_buckets'?: string + /** The number of buckets which did not contain any data. + * If your data contains many empty buckets, consider increasing your `bucket_span` or using functions that are tolerant to gaps in data such as mean, `non_null_sum` or `non_zero_count`. + * @alias 'data.empty_buckets' */ deb?: string + /** The number of buckets which did not contain any data. + * If your data contains many empty buckets, consider increasing your `bucket_span` or using functions that are tolerant to gaps in data such as mean, `non_null_sum` or `non_zero_count`. + * @alias 'data.empty_buckets' */ dataEmptyBuckets?: string + /** The number of buckets that contained few data points compared to the expected number of data points. + * If your data contains many sparse buckets, consider using a longer `bucket_span`. */ 'data.sparse_buckets'?: string + /** The number of buckets that contained few data points compared to the expected number of data points. + * If your data contains many sparse buckets, consider using a longer `bucket_span`. + * @alias 'data.sparse_buckets' */ dsb?: string + /** The number of buckets that contained few data points compared to the expected number of data points. + * If your data contains many sparse buckets, consider using a longer `bucket_span`. + * @alias 'data.sparse_buckets' */ dataSparseBuckets?: string + /** The total number of buckets processed. */ 'data.buckets'?: string + /** The total number of buckets processed. + * @alias 'data.buckets' */ db?: string + /** The total number of buckets processed. + * @alias 'data.buckets' */ dataBuckets?: string + /** The timestamp of the earliest chronologically input document. */ 'data.earliest_record'?: string + /** The timestamp of the earliest chronologically input document. + * @alias 'data.earliest_record' */ der?: string + /** The timestamp of the earliest chronologically input document. + * @alias 'data.earliest_record' */ dataEarliestRecord?: string + /** The timestamp of the latest chronologically input document. */ 'data.latest_record'?: string + /** The timestamp of the latest chronologically input document. + * @alias 'data.latest_record' */ dlr?: string + /** The timestamp of the latest chronologically input document. + * @alias 'data.latest_record' */ dataLatestRecord?: string + /** The timestamp at which data was last analyzed, according to server time. */ 'data.last'?: string + /** The timestamp at which data was last analyzed, according to server time. + * @alias 'data.last' */ dl?: string + /** The timestamp at which data was last analyzed, according to server time. + * @alias 'data.last' */ dataLast?: string + /** The timestamp of the last bucket that did not contain any data. */ 'data.last_empty_bucket'?: string + /** The timestamp of the last bucket that did not contain any data. + * @alias 'data.last_empty_bucket' */ dleb?: string + /** The timestamp of the last bucket that did not contain any data. + * @alias 'data.last_empty_bucket' */ dataLastEmptyBucket?: string + /** The timestamp of the last bucket that was considered sparse. */ 'data.last_sparse_bucket'?: string + /** The timestamp of the last bucket that was considered sparse. + * @alias 'data.last_sparse_bucket' */ dlsb?: string + /** The timestamp of the last bucket that was considered sparse. + * @alias 'data.last_sparse_bucket' */ dataLastSparseBucket?: string + /** The number of bytes of memory used by the models. + * This is the maximum value since the last time the model was persisted. + * If the job is closed, this value indicates the latest size. */ 'model.bytes'?: ByteSize + /** The number of bytes of memory used by the models. + * This is the maximum value since the last time the model was persisted. + * If the job is closed, this value indicates the latest size. + * @alias 'model.bytes' */ mb?: ByteSize + /** The number of bytes of memory used by the models. + * This is the maximum value since the last time the model was persisted. + * If the job is closed, this value indicates the latest size. + * @alias 'model.bytes' */ modelBytes?: ByteSize + /** The status of the mathematical models. */ 'model.memory_status'?: MlMemoryStatus + /** The status of the mathematical models. + * @alias 'model.memory_status' */ mms?: MlMemoryStatus + /** The status of the mathematical models. + * @alias 'model.memory_status' */ modelMemoryStatus?: MlMemoryStatus + /** The number of bytes over the high limit for memory usage at the last allocation failure. */ 'model.bytes_exceeded'?: ByteSize + /** The number of bytes over the high limit for memory usage at the last allocation failure. + * @alias 'model.bytes_exceeded' */ mbe?: ByteSize + /** The number of bytes over the high limit for memory usage at the last allocation failure. + * @alias 'model.bytes_exceeded' */ modelBytesExceeded?: ByteSize + /** The upper limit for model memory usage, checked on increasing values. */ 'model.memory_limit'?: string + /** The upper limit for model memory usage, checked on increasing values. + * @alias 'model.memory_limit' */ mml?: string + /** The upper limit for model memory usage, checked on increasing values. + * @alias 'model.memory_limit' */ modelMemoryLimit?: string + /** The number of `by` field values that were analyzed by the models. + * This value is cumulative for all detectors in the job. */ 'model.by_fields'?: string + /** The number of `by` field values that were analyzed by the models. + * This value is cumulative for all detectors in the job. + * @alias 'model.by_fields' */ mbf?: string + /** The number of `by` field values that were analyzed by the models. + * This value is cumulative for all detectors in the job. + * @alias 'model.by_fields' */ modelByFields?: string + /** The number of `over` field values that were analyzed by the models. + * This value is cumulative for all detectors in the job. */ 'model.over_fields'?: string + /** The number of `over` field values that were analyzed by the models. + * This value is cumulative for all detectors in the job. + * @alias 'model.over_fields' */ mof?: string + /** The number of `over` field values that were analyzed by the models. + * This value is cumulative for all detectors in the job. + * @alias 'model.over_fields' */ modelOverFields?: string + /** The number of `partition` field values that were analyzed by the models. + * This value is cumulative for all detectors in the job. */ 'model.partition_fields'?: string + /** The number of `partition` field values that were analyzed by the models. + * This value is cumulative for all detectors in the job. + * @alias 'model.partition_fields' */ mpf?: string + /** The number of `partition` field values that were analyzed by the models. + * This value is cumulative for all detectors in the job. + * @alias 'model.partition_fields' */ modelPartitionFields?: string + /** The number of buckets for which new entities in incoming data were not processed due to insufficient model memory. + * This situation is also signified by a `hard_limit: memory_status` property value. */ 'model.bucket_allocation_failures'?: string + /** The number of buckets for which new entities in incoming data were not processed due to insufficient model memory. + * This situation is also signified by a `hard_limit: memory_status` property value. + * @alias 'model.bucket_allocation_failures' */ mbaf?: string + /** The number of buckets for which new entities in incoming data were not processed due to insufficient model memory. + * This situation is also signified by a `hard_limit: memory_status` property value. + * @alias 'model.bucket_allocation_failures' */ modelBucketAllocationFailures?: string + /** The status of categorization for the job. */ 'model.categorization_status'?: MlCategorizationStatus + /** The status of categorization for the job. + * @alias 'model.categorization_status' */ mcs?: MlCategorizationStatus + /** The status of categorization for the job. + * @alias 'model.categorization_status' */ modelCategorizationStatus?: MlCategorizationStatus + /** The number of documents that have had a field categorized. */ 'model.categorized_doc_count'?: string + /** The number of documents that have had a field categorized. + * @alias 'model.categorized_doc_count' */ mcdc?: string + /** The number of documents that have had a field categorized. + * @alias 'model.categorized_doc_count' */ modelCategorizedDocCount?: string + /** The number of categories created by categorization. */ 'model.total_category_count'?: string + /** The number of categories created by categorization. + * @alias 'model.total_category_count' */ mtcc?: string + /** The number of categories created by categorization. + * @alias 'model.total_category_count' */ modelTotalCategoryCount?: string + /** The number of categories that match more than 1% of categorized documents. */ 'model.frequent_category_count'?: string + /** The number of categories that match more than 1% of categorized documents. + * @alias 'model.frequent_category_count' */ modelFrequentCategoryCount?: string + /** The number of categories that match just one categorized document. */ 'model.rare_category_count'?: string + /** The number of categories that match just one categorized document. + * @alias 'model.rare_category_count' */ mrcc?: string + /** The number of categories that match just one categorized document. + * @alias 'model.rare_category_count' */ modelRareCategoryCount?: string + /** The number of categories created by categorization that will never be assigned again because another category’s definition makes it a superset of the dead category. + * Dead categories are a side effect of the way categorization has no prior training. */ 'model.dead_category_count'?: string + /** The number of categories created by categorization that will never be assigned again because another category’s definition makes it a superset of the dead category. + * Dead categories are a side effect of the way categorization has no prior training. + * @alias 'model.dead_category_count' */ mdcc?: string + /** The number of categories created by categorization that will never be assigned again because another category’s definition makes it a superset of the dead category. + * Dead categories are a side effect of the way categorization has no prior training. + * @alias 'model.dead_category_count' */ modelDeadCategoryCount?: string + /** The number of times that categorization wanted to create a new category but couldn’t because the job had hit its `model_memory_limit`. + * This count does not track which specific categories failed to be created. + * Therefore you cannot use this value to determine the number of unique categories that were missed. */ 'model.failed_category_count'?: string + /** The number of times that categorization wanted to create a new category but couldn’t because the job had hit its `model_memory_limit`. + * This count does not track which specific categories failed to be created. + * Therefore you cannot use this value to determine the number of unique categories that were missed. + * @alias 'model.failed_category_count' */ mfcc?: string + /** The number of times that categorization wanted to create a new category but couldn’t because the job had hit its `model_memory_limit`. + * This count does not track which specific categories failed to be created. + * Therefore you cannot use this value to determine the number of unique categories that were missed. + * @alias 'model.failed_category_count' */ modelFailedCategoryCount?: string + /** The timestamp when the model stats were gathered, according to server time. */ 'model.log_time'?: string + /** The timestamp when the model stats were gathered, according to server time. + * @alias 'model.log_time' */ mlt?: string + /** The timestamp when the model stats were gathered, according to server time. + * @alias 'model.log_time' */ modelLogTime?: string + /** The timestamp of the last record when the model stats were gathered. */ 'model.timestamp'?: string + /** The timestamp of the last record when the model stats were gathered. + * @alias 'model.timestamp' */ mt?: string + /** The timestamp of the last record when the model stats were gathered. + * @alias 'model.timestamp' */ modelTimestamp?: string + /** The number of individual forecasts currently available for the job. + * A value of one or more indicates that forecasts exist. */ 'forecasts.total'?: string + /** The number of individual forecasts currently available for the job. + * A value of one or more indicates that forecasts exist. + * @alias 'forecasts.total' */ ft?: string + /** The number of individual forecasts currently available for the job. + * A value of one or more indicates that forecasts exist. + * @alias 'forecasts.total' */ forecastsTotal?: string + /** The minimum memory usage in bytes for forecasts related to the anomaly detection job. */ 'forecasts.memory.min'?: string + /** The minimum memory usage in bytes for forecasts related to the anomaly detection job. + * @alias 'forecasts.memory.min' */ fmmin?: string + /** The minimum memory usage in bytes for forecasts related to the anomaly detection job. + * @alias 'forecasts.memory.min' */ forecastsMemoryMin?: string + /** The maximum memory usage in bytes for forecasts related to the anomaly detection job. */ 'forecasts.memory.max'?: string + /** The maximum memory usage in bytes for forecasts related to the anomaly detection job. + * @alias 'forecasts.memory.max' */ fmmax?: string + /** The maximum memory usage in bytes for forecasts related to the anomaly detection job. + * @alias 'forecasts.memory.max' */ forecastsMemoryMax?: string + /** The average memory usage in bytes for forecasts related to the anomaly detection job. */ 'forecasts.memory.avg'?: string + /** The average memory usage in bytes for forecasts related to the anomaly detection job. + * @alias 'forecasts.memory.avg' */ fmavg?: string + /** The average memory usage in bytes for forecasts related to the anomaly detection job. + * @alias 'forecasts.memory.avg' */ forecastsMemoryAvg?: string + /** The total memory usage in bytes for forecasts related to the anomaly detection job. */ 'forecasts.memory.total'?: string + /** The total memory usage in bytes for forecasts related to the anomaly detection job. + * @alias 'forecasts.memory.total' */ fmt?: string + /** The total memory usage in bytes for forecasts related to the anomaly detection job. + * @alias 'forecasts.memory.total' */ forecastsMemoryTotal?: string + /** The minimum number of `model_forecast` documents written for forecasts related to the anomaly detection job. */ 'forecasts.records.min'?: string + /** The minimum number of `model_forecast` documents written for forecasts related to the anomaly detection job. + * @alias 'forecasts.records.min' */ frmin?: string + /** The minimum number of `model_forecast` documents written for forecasts related to the anomaly detection job. + * @alias 'forecasts.records.min' */ forecastsRecordsMin?: string + /** The maximum number of `model_forecast` documents written for forecasts related to the anomaly detection job. */ 'forecasts.records.max'?: string + /** The maximum number of `model_forecast` documents written for forecasts related to the anomaly detection job. + * @alias 'forecasts.records.max' */ frmax?: string + /** The maximum number of `model_forecast` documents written for forecasts related to the anomaly detection job. + * @alias 'forecasts.records.max' */ forecastsRecordsMax?: string + /** The average number of `model_forecast` documents written for forecasts related to the anomaly detection job. */ 'forecasts.records.avg'?: string + /** The average number of `model_forecast` documents written for forecasts related to the anomaly detection job. + * @alias 'forecasts.records.avg' */ fravg?: string + /** The average number of `model_forecast` documents written for forecasts related to the anomaly detection job. + * @alias 'forecasts.records.avg' */ forecastsRecordsAvg?: string + /** The total number of `model_forecast` documents written for forecasts related to the anomaly detection job. */ 'forecasts.records.total'?: string + /** The total number of `model_forecast` documents written for forecasts related to the anomaly detection job. + * @alias 'forecasts.records.total' */ frt?: string + /** The total number of `model_forecast` documents written for forecasts related to the anomaly detection job. + * @alias 'forecasts.records.total' */ forecastsRecordsTotal?: string + /** The minimum runtime in milliseconds for forecasts related to the anomaly detection job. */ 'forecasts.time.min'?: string + /** The minimum runtime in milliseconds for forecasts related to the anomaly detection job. + * @alias 'forecasts.time.min' */ ftmin?: string + /** The minimum runtime in milliseconds for forecasts related to the anomaly detection job. + * @alias 'forecasts.time.min' */ forecastsTimeMin?: string + /** The maximum runtime in milliseconds for forecasts related to the anomaly detection job. */ 'forecasts.time.max'?: string + /** The maximum runtime in milliseconds for forecasts related to the anomaly detection job. + * @alias 'forecasts.time.max' */ ftmax?: string + /** The maximum runtime in milliseconds for forecasts related to the anomaly detection job. + * @alias 'forecasts.time.max' */ forecastsTimeMax?: string + /** The average runtime in milliseconds for forecasts related to the anomaly detection job. */ 'forecasts.time.avg'?: string + /** The average runtime in milliseconds for forecasts related to the anomaly detection job. + * @alias 'forecasts.time.avg' */ ftavg?: string + /** The average runtime in milliseconds for forecasts related to the anomaly detection job. + * @alias 'forecasts.time.avg' */ forecastsTimeAvg?: string + /** The total runtime in milliseconds for forecasts related to the anomaly detection job. */ 'forecasts.time.total'?: string + /** The total runtime in milliseconds for forecasts related to the anomaly detection job. + * @alias 'forecasts.time.total' */ ftt?: string + /** The total runtime in milliseconds for forecasts related to the anomaly detection job. + * @alias 'forecasts.time.total' */ forecastsTimeTotal?: string + /** The uniqe identifier of the assigned node. */ 'node.id'?: NodeId + /** The uniqe identifier of the assigned node. + * @alias 'node.id' */ ni?: NodeId + /** The uniqe identifier of the assigned node. + * @alias 'node.id' */ nodeId?: NodeId + /** The name of the assigned node. */ 'node.name'?: string + /** The name of the assigned node. + * @alias 'node.name' */ nn?: string + /** The name of the assigned node. + * @alias 'node.name' */ nodeName?: string + /** The ephemeral identifier of the assigned node. */ 'node.ephemeral_id'?: NodeId + /** The ephemeral identifier of the assigned node. + * @alias 'node.ephemeral_id' */ ne?: NodeId + /** The ephemeral identifier of the assigned node. + * @alias 'node.ephemeral_id' */ nodeEphemeralId?: NodeId + /** The network address of the assigned node. */ 'node.address'?: string + /** The network address of the assigned node. + * @alias 'node.address' */ na?: string + /** The network address of the assigned node. + * @alias 'node.address' */ nodeAddress?: string + /** The number of bucket results produced by the job. */ 'buckets.count'?: string + /** The number of bucket results produced by the job. + * @alias 'buckets.count' */ bc?: string + /** The number of bucket results produced by the job. + * @alias 'buckets.count' */ bucketsCount?: string + /** The sum of all bucket processing times, in milliseconds. */ 'buckets.time.total'?: string + /** The sum of all bucket processing times, in milliseconds. + * @alias 'buckets.time.total' */ btt?: string + /** The sum of all bucket processing times, in milliseconds. + * @alias 'buckets.time.total' */ bucketsTimeTotal?: string + /** The minimum of all bucket processing times, in milliseconds. */ 'buckets.time.min'?: string + /** The minimum of all bucket processing times, in milliseconds. + * @alias 'buckets.time.min' */ btmin?: string + /** The minimum of all bucket processing times, in milliseconds. + * @alias 'buckets.time.min' */ bucketsTimeMin?: string + /** The maximum of all bucket processing times, in milliseconds. */ 'buckets.time.max'?: string + /** The maximum of all bucket processing times, in milliseconds. + * @alias 'buckets.time.max' */ btmax?: string + /** The maximum of all bucket processing times, in milliseconds. + * @alias 'buckets.time.max' */ bucketsTimeMax?: string + /** The exponential moving average of all bucket processing times, in milliseconds. */ 'buckets.time.exp_avg'?: string + /** The exponential moving average of all bucket processing times, in milliseconds. + * @alias 'buckets.time.exp_avg' */ btea?: string + /** The exponential moving average of all bucket processing times, in milliseconds. + * @alias 'buckets.time.exp_avg' */ bucketsTimeExpAvg?: string + /** The exponential moving average of bucket processing times calculated in a one hour time window, in milliseconds. */ 'buckets.time.exp_avg_hour'?: string + /** The exponential moving average of bucket processing times calculated in a one hour time window, in milliseconds. + * @alias 'buckets.time.exp_avg_hour' */ bteah?: string + /** The exponential moving average of bucket processing times calculated in a one hour time window, in milliseconds. + * @alias 'buckets.time.exp_avg_hour' */ bucketsTimeExpAvgHour?: string } export interface CatMlJobsRequest extends CatCatRequestBase { -/** Identifier for the anomaly detection job. */ + /** Identifier for the anomaly detection job. */ job_id?: Id - /** Specifies what to do when the request: * Contains wildcard expressions and there are no jobs that match. * Contains the `_all` string or no identifiers and there are no matches. * Contains wildcard expressions and there are only partial matches. If `true`, the API returns an empty jobs array when there are no matches and the subset of results when there are partial matches. If `false`, the API returns a 404 status code when there are no matches or only partial matches. */ + /** Specifies what to do when the request: + * + * * Contains wildcard expressions and there are no jobs that match. + * * Contains the `_all` string or no identifiers and there are no matches. + * * Contains wildcard expressions and there are only partial matches. + * + * If `true`, the API returns an empty jobs array when there are no matches and the subset of results when there + * are partial matches. If `false`, the API returns a 404 status code when there are no matches or only partial + * matches. */ allow_no_match?: boolean /** The unit used to display byte values. */ bytes?: Bytes @@ -8554,9 +12018,11 @@ export interface CatMlJobsRequest extends CatCatRequestBase { export type CatMlJobsResponse = CatMlJobsJobsRecord[] export interface CatMlTrainedModelsRequest extends CatCatRequestBase { -/** A unique identifier for the trained model. */ + /** A unique identifier for the trained model. */ model_id?: Id - /** Specifies what to do when the request: contains wildcard expressions and there are no models that match; contains the `_all` string or no identifiers and there are no matches; contains wildcard expressions and there are only partial matches. If `true`, the API returns an empty array when there are no matches and the subset of results when there are partial matches. If `false`, the API returns a 404 status code when there are no matches or only partial matches. */ + /** Specifies what to do when the request: contains wildcard expressions and there are no models that match; contains the `_all` string or no identifiers and there are no matches; contains wildcard expressions and there are only partial matches. + * If `true`, the API returns an empty array when there are no matches and the subset of results when there are partial matches. + * If `false`, the API returns a 404 status code when there are no matches or only partial matches. */ allow_no_match?: boolean /** The unit used to display byte values. */ bytes?: Bytes @@ -8579,73 +12045,169 @@ export interface CatMlTrainedModelsRequest extends CatCatRequestBase { export type CatMlTrainedModelsResponse = CatMlTrainedModelsTrainedModelsRecord[] export interface CatMlTrainedModelsTrainedModelsRecord { + /** The model identifier. */ id?: Id + /** Information about the creator of the model. */ created_by?: string + /** Information about the creator of the model. + * @alias created_by */ c?: string + /** Information about the creator of the model. + * @alias created_by */ createdBy?: string + /** The estimated heap size to keep the model in memory. */ heap_size?: ByteSize + /** The estimated heap size to keep the model in memory. + * @alias heap_size */ hs?: ByteSize + /** The estimated heap size to keep the model in memory. + * @alias heap_size */ modelHeapSize?: ByteSize + /** The estimated number of operations to use the model. + * This number helps to measure the computational complexity of the model. */ operations?: string + /** The estimated number of operations to use the model. + * This number helps to measure the computational complexity of the model. + * @alias operations */ o?: string + /** The estimated number of operations to use the model. + * This number helps to measure the computational complexity of the model. + * @alias operations */ modelOperations?: string + /** The license level of the model. */ license?: string + /** The license level of the model. + * @alias license */ l?: string + /** The time the model was created. */ create_time?: DateTime + /** The time the model was created. + * @alias create_time */ ct?: DateTime + /** The version of Elasticsearch when the model was created. */ version?: VersionString + /** The version of Elasticsearch when the model was created. + * @alias version */ v?: VersionString + /** A description of the model. */ description?: string + /** A description of the model. + * @alias description */ d?: string + /** The number of pipelines that are referencing the model. */ 'ingest.pipelines'?: string + /** The number of pipelines that are referencing the model. + * @alias 'ingest.pipelines' */ ip?: string + /** The number of pipelines that are referencing the model. + * @alias 'ingest.pipelines' */ ingestPipelines?: string + /** The total number of documents that are processed by the model. */ 'ingest.count'?: string + /** The total number of documents that are processed by the model. + * @alias 'ingest.count' */ ic?: string + /** The total number of documents that are processed by the model. + * @alias 'ingest.count' */ ingestCount?: string + /** The total time spent processing documents with thie model. */ 'ingest.time'?: string + /** The total time spent processing documents with thie model. + * @alias 'ingest.time' */ it?: string + /** The total time spent processing documents with thie model. + * @alias 'ingest.time' */ ingestTime?: string + /** The total number of documents that are currently being handled by the model. */ 'ingest.current'?: string + /** The total number of documents that are currently being handled by the model. + * @alias 'ingest.current' */ icurr?: string + /** The total number of documents that are currently being handled by the model. + * @alias 'ingest.current' */ ingestCurrent?: string + /** The total number of failed ingest attempts with the model. */ 'ingest.failed'?: string + /** The total number of failed ingest attempts with the model. + * @alias 'ingest.failed' */ if?: string + /** The total number of failed ingest attempts with the model. + * @alias 'ingest.failed' */ ingestFailed?: string + /** The identifier for the data frame analytics job that created the model. + * Only displayed if the job is still available. */ 'data_frame.id'?: string + /** The identifier for the data frame analytics job that created the model. + * Only displayed if the job is still available. + * @alias 'data_frame.id' */ dfid?: string + /** The identifier for the data frame analytics job that created the model. + * Only displayed if the job is still available. + * @alias 'data_frame.id' */ dataFrameAnalytics?: string + /** The time the data frame analytics job was created. */ 'data_frame.create_time'?: string + /** The time the data frame analytics job was created. + * @alias 'data_frame.create_time' */ dft?: string + /** The time the data frame analytics job was created. + * @alias 'data_frame.create_time' */ dataFrameAnalyticsTime?: string + /** The source index used to train in the data frame analysis. */ 'data_frame.source_index'?: string + /** The source index used to train in the data frame analysis. + * @alias 'data_frame.source_index' */ dfsi?: string + /** The source index used to train in the data frame analysis. + * @alias 'data_frame.source_index' */ dataFrameAnalyticsSrcIndex?: string + /** The analysis used by the data frame to build the model. */ 'data_frame.analysis'?: string + /** The analysis used by the data frame to build the model. + * @alias 'data_frame.analysis' */ dfa?: string + /** The analysis used by the data frame to build the model. + * @alias 'data_frame.analysis' */ dataFrameAnalyticsAnalysis?: string type?: string } export interface CatNodeattrsNodeAttributesRecord { + /** The node name. */ node?: string + /** The unique node identifier. */ id?: string + /** The process identifier. */ pid?: string + /** The host name. */ host?: string + /** The host name. + * @alias host */ h?: string + /** The IP address. */ ip?: string + /** The IP address. + * @alias ip */ i?: string + /** The bound transport port. */ port?: string + /** The attribute name. */ attr?: string + /** The attribute value. */ value?: string } export interface CatNodeattrsRequest extends CatCatRequestBase { -/** List of columns to appear in the response. Supports simple wildcards. */ + /** List of columns to appear in the response. Supports simple wildcards. */ h?: Names - /** List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. */ + /** List of columns that determine how the table should be sorted. + * Sorting defaults to ascending and can be changed by setting `:asc` + * or `:desc` as a suffix to the column name. */ s?: Names - /** If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node. */ + /** If `true`, the request computes the list of selected nodes from the + * local cluster state. If `false` the list of selected nodes are computed + * from the cluster state of the master node. In both cases the coordinating + * node will send requests for further information to each selected node. */ local?: boolean /** Period to wait for a connection to the master node. */ master_timeout?: Duration @@ -8658,277 +12220,721 @@ export interface CatNodeattrsRequest extends CatCatRequestBase { export type CatNodeattrsResponse = CatNodeattrsNodeAttributesRecord[] export interface CatNodesNodesRecord { + /** The unique node identifier. */ id?: Id + /** The unique node identifier. + * @alias id */ nodeId?: Id + /** The process identifier. */ pid?: string + /** The process identifier. + * @alias pid */ p?: string + /** The IP address. */ ip?: string + /** The IP address. + * @alias ip */ i?: string + /** The bound transport port. */ port?: string + /** The bound transport port. + * @alias port */ po?: string + /** The bound HTTP address. */ http_address?: string + /** The bound HTTP address. + * @alias http_address */ http?: string + /** The Elasticsearch version. */ version?: VersionString + /** The Elasticsearch version. + * @alias version */ v?: VersionString + /** The Elasticsearch distribution flavor. */ flavor?: string + /** The Elasticsearch distribution flavor. + * @alias flavor */ f?: string + /** The Elasticsearch distribution type. */ type?: string + /** The Elasticsearch distribution type. + * @alias type */ t?: string + /** The Elasticsearch build hash. */ build?: string + /** The Elasticsearch build hash. + * @alias build */ b?: string + /** The Java version. */ jdk?: string + /** The Java version. + * @alias jdk */ j?: string + /** The total disk space. */ 'disk.total'?: ByteSize + /** The total disk space. + * @alias 'disk.total' */ dt?: ByteSize + /** The total disk space. + * @alias 'disk.total' */ diskTotal?: ByteSize + /** The used disk space. */ 'disk.used'?: ByteSize + /** The used disk space. + * @alias 'disk.used' */ du?: ByteSize + /** The used disk space. + * @alias 'disk.used' */ diskUsed?: ByteSize + /** The available disk space. */ 'disk.avail'?: ByteSize + /** The available disk space. + * @alias 'disk.avail' */ d?: ByteSize + /** The available disk space. + * @alias 'disk.avail' */ da?: ByteSize + /** The available disk space. + * @alias 'disk.avail' */ disk?: ByteSize + /** The available disk space. + * @alias 'disk.avail' */ diskAvail?: ByteSize + /** The used disk space percentage. */ 'disk.used_percent'?: Percentage + /** The used disk space percentage. + * @alias 'disk.used_percent' */ dup?: Percentage + /** The used disk space percentage. + * @alias 'disk.used_percent' */ diskUsedPercent?: Percentage + /** The used heap. */ 'heap.current'?: string + /** The used heap. + * @alias 'heap.current' */ hc?: string + /** The used heap. + * @alias 'heap.current' */ heapCurrent?: string + /** The used heap ratio. */ 'heap.percent'?: Percentage + /** The used heap ratio. + * @alias 'heap.percent' */ hp?: Percentage + /** The used heap ratio. + * @alias 'heap.percent' */ heapPercent?: Percentage + /** The maximum configured heap. */ 'heap.max'?: string + /** The maximum configured heap. + * @alias 'heap.max' */ hm?: string + /** The maximum configured heap. + * @alias 'heap.max' */ heapMax?: string + /** The used machine memory. */ 'ram.current'?: string + /** The used machine memory. + * @alias 'ram.current' */ rc?: string + /** The used machine memory. + * @alias 'ram.current' */ ramCurrent?: string + /** The used machine memory ratio. */ 'ram.percent'?: Percentage + /** The used machine memory ratio. + * @alias 'ram.percent' */ rp?: Percentage + /** The used machine memory ratio. + * @alias 'ram.percent' */ ramPercent?: Percentage + /** The total machine memory. */ 'ram.max'?: string + /** The total machine memory. + * @alias 'ram.max' */ rn?: string + /** The total machine memory. + * @alias 'ram.max' */ ramMax?: string + /** The used file descriptors. */ 'file_desc.current'?: string + /** The used file descriptors. + * @alias 'file_desc.current' */ fdc?: string + /** The used file descriptors. + * @alias 'file_desc.current' */ fileDescriptorCurrent?: string + /** The used file descriptor ratio. */ 'file_desc.percent'?: Percentage + /** The used file descriptor ratio. + * @alias 'file_desc.percent' */ fdp?: Percentage + /** The used file descriptor ratio. + * @alias 'file_desc.percent' */ fileDescriptorPercent?: Percentage + /** The maximum number of file descriptors. */ 'file_desc.max'?: string + /** The maximum number of file descriptors. + * @alias 'file_desc.max' */ fdm?: string + /** The maximum number of file descriptors. + * @alias 'file_desc.max' */ fileDescriptorMax?: string + /** The recent system CPU usage as a percentage. */ cpu?: string + /** The load average for the most recent minute. */ load_1m?: string + /** The load average for the last five minutes. */ load_5m?: string + /** The load average for the last fifteen minutes. */ load_15m?: string + /** The load average for the last fifteen minutes. + * @alias load_15m */ l?: string + /** The node uptime. */ uptime?: string + /** The node uptime. + * @alias uptime */ u?: string + /** The roles of the node. + * Returned values include `c`(cold node), `d`(data node), `f`(frozen node), `h`(hot node), `i`(ingest node), `l`(machine learning node), `m` (master eligible node), `r`(remote cluster client node), `s`(content node), `t`(transform node), `v`(voting-only node), `w`(warm node),and `-`(coordinating node only). */ 'node.role'?: string + /** The roles of the node. + * Returned values include `c`(cold node), `d`(data node), `f`(frozen node), `h`(hot node), `i`(ingest node), `l`(machine learning node), `m` (master eligible node), `r`(remote cluster client node), `s`(content node), `t`(transform node), `v`(voting-only node), `w`(warm node),and `-`(coordinating node only). + * @alias 'node.role' */ r?: string + /** The roles of the node. + * Returned values include `c`(cold node), `d`(data node), `f`(frozen node), `h`(hot node), `i`(ingest node), `l`(machine learning node), `m` (master eligible node), `r`(remote cluster client node), `s`(content node), `t`(transform node), `v`(voting-only node), `w`(warm node),and `-`(coordinating node only). + * @alias 'node.role' */ role?: string + /** The roles of the node. + * Returned values include `c`(cold node), `d`(data node), `f`(frozen node), `h`(hot node), `i`(ingest node), `l`(machine learning node), `m` (master eligible node), `r`(remote cluster client node), `s`(content node), `t`(transform node), `v`(voting-only node), `w`(warm node),and `-`(coordinating node only). + * @alias 'node.role' */ nodeRole?: string + /** Indicates whether the node is the elected master node. + * Returned values include `*`(elected master) and `-`(not elected master). */ master?: string + /** Indicates whether the node is the elected master node. + * Returned values include `*`(elected master) and `-`(not elected master). + * @alias master */ m?: string + /** The node name. */ name?: Name + /** The node name. + * @alias name */ n?: Name + /** The size of completion. */ 'completion.size'?: string + /** The size of completion. + * @alias 'completion.size' */ cs?: string + /** The size of completion. + * @alias 'completion.size' */ completionSize?: string + /** The used fielddata cache. */ 'fielddata.memory_size'?: string + /** The used fielddata cache. + * @alias 'fielddata.memory_size' */ fm?: string + /** The used fielddata cache. + * @alias 'fielddata.memory_size' */ fielddataMemory?: string + /** The fielddata evictions. */ 'fielddata.evictions'?: string + /** The fielddata evictions. + * @alias 'fielddata.evictions' */ fe?: string + /** The fielddata evictions. + * @alias 'fielddata.evictions' */ fielddataEvictions?: string + /** The used query cache. */ 'query_cache.memory_size'?: string + /** The used query cache. + * @alias 'query_cache.memory_size' */ qcm?: string + /** The used query cache. + * @alias 'query_cache.memory_size' */ queryCacheMemory?: string + /** The query cache evictions. */ 'query_cache.evictions'?: string + /** The query cache evictions. + * @alias 'query_cache.evictions' */ qce?: string + /** The query cache evictions. + * @alias 'query_cache.evictions' */ queryCacheEvictions?: string + /** The query cache hit counts. */ 'query_cache.hit_count'?: string + /** The query cache hit counts. + * @alias 'query_cache.hit_count' */ qchc?: string + /** The query cache hit counts. + * @alias 'query_cache.hit_count' */ queryCacheHitCount?: string + /** The query cache miss counts. */ 'query_cache.miss_count'?: string + /** The query cache miss counts. + * @alias 'query_cache.miss_count' */ qcmc?: string + /** The query cache miss counts. + * @alias 'query_cache.miss_count' */ queryCacheMissCount?: string + /** The used request cache. */ 'request_cache.memory_size'?: string + /** The used request cache. + * @alias 'request_cache.memory_size' */ rcm?: string + /** The used request cache. + * @alias 'request_cache.memory_size' */ requestCacheMemory?: string + /** The request cache evictions. */ 'request_cache.evictions'?: string + /** The request cache evictions. + * @alias 'request_cache.evictions' */ rce?: string + /** The request cache evictions. + * @alias 'request_cache.evictions' */ requestCacheEvictions?: string + /** The request cache hit counts. */ 'request_cache.hit_count'?: string + /** The request cache hit counts. + * @alias 'request_cache.hit_count' */ rchc?: string + /** The request cache hit counts. + * @alias 'request_cache.hit_count' */ requestCacheHitCount?: string + /** The request cache miss counts. */ 'request_cache.miss_count'?: string + /** The request cache miss counts. + * @alias 'request_cache.miss_count' */ rcmc?: string + /** The request cache miss counts. + * @alias 'request_cache.miss_count' */ requestCacheMissCount?: string + /** The number of flushes. */ 'flush.total'?: string + /** The number of flushes. + * @alias 'flush.total' */ ft?: string + /** The number of flushes. + * @alias 'flush.total' */ flushTotal?: string + /** The time spent in flush. */ 'flush.total_time'?: string + /** The time spent in flush. + * @alias 'flush.total_time' */ ftt?: string + /** The time spent in flush. + * @alias 'flush.total_time' */ flushTotalTime?: string + /** The number of current get ops. */ 'get.current'?: string + /** The number of current get ops. + * @alias 'get.current' */ gc?: string + /** The number of current get ops. + * @alias 'get.current' */ getCurrent?: string + /** The time spent in get. */ 'get.time'?: string + /** The time spent in get. + * @alias 'get.time' */ gti?: string + /** The time spent in get. + * @alias 'get.time' */ getTime?: string + /** The number of get ops. */ 'get.total'?: string + /** The number of get ops. + * @alias 'get.total' */ gto?: string + /** The number of get ops. + * @alias 'get.total' */ getTotal?: string + /** The time spent in successful gets. */ 'get.exists_time'?: string + /** The time spent in successful gets. + * @alias 'get.exists_time' */ geti?: string + /** The time spent in successful gets. + * @alias 'get.exists_time' */ getExistsTime?: string + /** The number of successful get operations. */ 'get.exists_total'?: string + /** The number of successful get operations. + * @alias 'get.exists_total' */ geto?: string + /** The number of successful get operations. + * @alias 'get.exists_total' */ getExistsTotal?: string + /** The time spent in failed gets. */ 'get.missing_time'?: string + /** The time spent in failed gets. + * @alias 'get.missing_time' */ gmti?: string + /** The time spent in failed gets. + * @alias 'get.missing_time' */ getMissingTime?: string + /** The number of failed gets. */ 'get.missing_total'?: string + /** The number of failed gets. + * @alias 'get.missing_total' */ gmto?: string + /** The number of failed gets. + * @alias 'get.missing_total' */ getMissingTotal?: string + /** The number of current deletions. */ 'indexing.delete_current'?: string + /** The number of current deletions. + * @alias 'indexing.delete_current' */ idc?: string + /** The number of current deletions. + * @alias 'indexing.delete_current' */ indexingDeleteCurrent?: string + /** The time spent in deletions. */ 'indexing.delete_time'?: string + /** The time spent in deletions. + * @alias 'indexing.delete_time' */ idti?: string + /** The time spent in deletions. + * @alias 'indexing.delete_time' */ indexingDeleteTime?: string + /** The number of delete operations. */ 'indexing.delete_total'?: string + /** The number of delete operations. + * @alias 'indexing.delete_total' */ idto?: string + /** The number of delete operations. + * @alias 'indexing.delete_total' */ indexingDeleteTotal?: string + /** The number of current indexing operations. */ 'indexing.index_current'?: string + /** The number of current indexing operations. + * @alias 'indexing.index_current' */ iic?: string + /** The number of current indexing operations. + * @alias 'indexing.index_current' */ indexingIndexCurrent?: string + /** The time spent in indexing. */ 'indexing.index_time'?: string + /** The time spent in indexing. + * @alias 'indexing.index_time' */ iiti?: string + /** The time spent in indexing. + * @alias 'indexing.index_time' */ indexingIndexTime?: string + /** The number of indexing operations. */ 'indexing.index_total'?: string + /** The number of indexing operations. + * @alias 'indexing.index_total' */ iito?: string + /** The number of indexing operations. + * @alias 'indexing.index_total' */ indexingIndexTotal?: string + /** The number of failed indexing operations. */ 'indexing.index_failed'?: string + /** The number of failed indexing operations. + * @alias 'indexing.index_failed' */ iif?: string + /** The number of failed indexing operations. + * @alias 'indexing.index_failed' */ indexingIndexFailed?: string + /** The number of current merges. */ 'merges.current'?: string + /** The number of current merges. + * @alias 'merges.current' */ mc?: string + /** The number of current merges. + * @alias 'merges.current' */ mergesCurrent?: string + /** The number of current merging docs. */ 'merges.current_docs'?: string + /** The number of current merging docs. + * @alias 'merges.current_docs' */ mcd?: string + /** The number of current merging docs. + * @alias 'merges.current_docs' */ mergesCurrentDocs?: string + /** The size of current merges. */ 'merges.current_size'?: string + /** The size of current merges. + * @alias 'merges.current_size' */ mcs?: string + /** The size of current merges. + * @alias 'merges.current_size' */ mergesCurrentSize?: string + /** The number of completed merge operations. */ 'merges.total'?: string + /** The number of completed merge operations. + * @alias 'merges.total' */ mt?: string + /** The number of completed merge operations. + * @alias 'merges.total' */ mergesTotal?: string + /** The docs merged. */ 'merges.total_docs'?: string + /** The docs merged. + * @alias 'merges.total_docs' */ mtd?: string + /** The docs merged. + * @alias 'merges.total_docs' */ mergesTotalDocs?: string + /** The size merged. */ 'merges.total_size'?: string + /** The size merged. + * @alias 'merges.total_size' */ mts?: string + /** The size merged. + * @alias 'merges.total_size' */ mergesTotalSize?: string + /** The time spent in merges. */ 'merges.total_time'?: string + /** The time spent in merges. + * @alias 'merges.total_time' */ mtt?: string + /** The time spent in merges. + * @alias 'merges.total_time' */ mergesTotalTime?: string + /** The total refreshes. */ 'refresh.total'?: string + /** The time spent in refreshes. */ 'refresh.time'?: string + /** The total external refreshes. */ 'refresh.external_total'?: string + /** The total external refreshes. + * @alias 'refresh.external_total' */ rto?: string + /** The total external refreshes. + * @alias 'refresh.external_total' */ refreshTotal?: string + /** The time spent in external refreshes. */ 'refresh.external_time'?: string + /** The time spent in external refreshes. + * @alias 'refresh.external_time' */ rti?: string + /** The time spent in external refreshes. + * @alias 'refresh.external_time' */ refreshTime?: string + /** The number of pending refresh listeners. */ 'refresh.listeners'?: string + /** The number of pending refresh listeners. + * @alias 'refresh.listeners' */ rli?: string + /** The number of pending refresh listeners. + * @alias 'refresh.listeners' */ refreshListeners?: string + /** The total script compilations. */ 'script.compilations'?: string + /** The total script compilations. + * @alias 'script.compilations' */ scrcc?: string + /** The total script compilations. + * @alias 'script.compilations' */ scriptCompilations?: string + /** The total compiled scripts evicted from the cache. */ 'script.cache_evictions'?: string + /** The total compiled scripts evicted from the cache. + * @alias 'script.cache_evictions' */ scrce?: string + /** The total compiled scripts evicted from the cache. + * @alias 'script.cache_evictions' */ scriptCacheEvictions?: string + /** The script cache compilation limit triggered. */ 'script.compilation_limit_triggered'?: string + /** The script cache compilation limit triggered. + * @alias 'script.compilation_limit_triggered' */ scrclt?: string + /** The script cache compilation limit triggered. + * @alias 'script.compilation_limit_triggered' */ scriptCacheCompilationLimitTriggered?: string + /** The current fetch phase operations. */ 'search.fetch_current'?: string + /** The current fetch phase operations. + * @alias 'search.fetch_current' */ sfc?: string + /** The current fetch phase operations. + * @alias 'search.fetch_current' */ searchFetchCurrent?: string + /** The time spent in fetch phase. */ 'search.fetch_time'?: string + /** The time spent in fetch phase. + * @alias 'search.fetch_time' */ sfti?: string + /** The time spent in fetch phase. + * @alias 'search.fetch_time' */ searchFetchTime?: string + /** The total fetch operations. */ 'search.fetch_total'?: string + /** The total fetch operations. + * @alias 'search.fetch_total' */ sfto?: string + /** The total fetch operations. + * @alias 'search.fetch_total' */ searchFetchTotal?: string + /** The open search contexts. */ 'search.open_contexts'?: string + /** The open search contexts. + * @alias 'search.open_contexts' */ so?: string + /** The open search contexts. + * @alias 'search.open_contexts' */ searchOpenContexts?: string + /** The current query phase operations. */ 'search.query_current'?: string + /** The current query phase operations. + * @alias 'search.query_current' */ sqc?: string + /** The current query phase operations. + * @alias 'search.query_current' */ searchQueryCurrent?: string + /** The time spent in query phase. */ 'search.query_time'?: string + /** The time spent in query phase. + * @alias 'search.query_time' */ sqti?: string + /** The time spent in query phase. + * @alias 'search.query_time' */ searchQueryTime?: string + /** The total query phase operations. */ 'search.query_total'?: string + /** The total query phase operations. + * @alias 'search.query_total' */ sqto?: string + /** The total query phase operations. + * @alias 'search.query_total' */ searchQueryTotal?: string + /** The open scroll contexts. */ 'search.scroll_current'?: string + /** The open scroll contexts. + * @alias 'search.scroll_current' */ scc?: string + /** The open scroll contexts. + * @alias 'search.scroll_current' */ searchScrollCurrent?: string + /** The time scroll contexts held open. */ 'search.scroll_time'?: string + /** The time scroll contexts held open. + * @alias 'search.scroll_time' */ scti?: string + /** The time scroll contexts held open. + * @alias 'search.scroll_time' */ searchScrollTime?: string + /** The completed scroll contexts. */ 'search.scroll_total'?: string + /** The completed scroll contexts. + * @alias 'search.scroll_total' */ scto?: string + /** The completed scroll contexts. + * @alias 'search.scroll_total' */ searchScrollTotal?: string + /** The number of segments. */ 'segments.count'?: string + /** The number of segments. + * @alias 'segments.count' */ sc?: string + /** The number of segments. + * @alias 'segments.count' */ segmentsCount?: string + /** The memory used by segments. */ 'segments.memory'?: string + /** The memory used by segments. + * @alias 'segments.memory' */ sm?: string + /** The memory used by segments. + * @alias 'segments.memory' */ segmentsMemory?: string + /** The memory used by the index writer. */ 'segments.index_writer_memory'?: string + /** The memory used by the index writer. + * @alias 'segments.index_writer_memory' */ siwm?: string + /** The memory used by the index writer. + * @alias 'segments.index_writer_memory' */ segmentsIndexWriterMemory?: string + /** The memory used by the version map. */ 'segments.version_map_memory'?: string + /** The memory used by the version map. + * @alias 'segments.version_map_memory' */ svmm?: string + /** The memory used by the version map. + * @alias 'segments.version_map_memory' */ segmentsVersionMapMemory?: string + /** The memory used by fixed bit sets for nested object field types and export type filters for types referred in _parent fields. */ 'segments.fixed_bitset_memory'?: string + /** The memory used by fixed bit sets for nested object field types and export type filters for types referred in _parent fields. + * @alias 'segments.fixed_bitset_memory' */ sfbm?: string + /** The memory used by fixed bit sets for nested object field types and export type filters for types referred in _parent fields. + * @alias 'segments.fixed_bitset_memory' */ fixedBitsetMemory?: string + /** The number of current suggest operations. */ 'suggest.current'?: string + /** The number of current suggest operations. + * @alias 'suggest.current' */ suc?: string + /** The number of current suggest operations. + * @alias 'suggest.current' */ suggestCurrent?: string + /** The time spend in suggest. */ 'suggest.time'?: string + /** The time spend in suggest. + * @alias 'suggest.time' */ suti?: string + /** The time spend in suggest. + * @alias 'suggest.time' */ suggestTime?: string + /** The number of suggest operations. */ 'suggest.total'?: string + /** The number of suggest operations. + * @alias 'suggest.total' */ suto?: string + /** The number of suggest operations. + * @alias 'suggest.total' */ suggestTotal?: string + /** The number of bulk shard operations. */ 'bulk.total_operations'?: string + /** The number of bulk shard operations. + * @alias 'bulk.total_operations' */ bto?: string + /** The number of bulk shard operations. + * @alias 'bulk.total_operations' */ bulkTotalOperations?: string + /** The time spend in shard bulk. */ 'bulk.total_time'?: string + /** The time spend in shard bulk. + * @alias 'bulk.total_time' */ btti?: string + /** The time spend in shard bulk. + * @alias 'bulk.total_time' */ bulkTotalTime?: string + /** The total size in bytes of shard bulk. */ 'bulk.total_size_in_bytes'?: string + /** The total size in bytes of shard bulk. + * @alias 'bulk.total_size_in_bytes' */ btsi?: string + /** The total size in bytes of shard bulk. + * @alias 'bulk.total_size_in_bytes' */ bulkTotalSizeInBytes?: string + /** The average time spend in shard bulk. */ 'bulk.avg_time'?: string + /** The average time spend in shard bulk. + * @alias 'bulk.avg_time' */ bati?: string + /** The average time spend in shard bulk. + * @alias 'bulk.avg_time' */ bulkAvgTime?: string + /** The average size in bytes of shard bulk. */ 'bulk.avg_size_in_bytes'?: string + /** The average size in bytes of shard bulk. + * @alias 'bulk.avg_size_in_bytes' */ basi?: string + /** The average size in bytes of shard bulk. + * @alias 'bulk.avg_size_in_bytes' */ bulkAvgSizeInBytes?: string } export interface CatNodesRequest extends CatCatRequestBase { -/** The unit used to display byte values. */ + /** The unit used to display byte values. */ bytes?: Bytes /** If `true`, return the full node ID. If `false`, return the shortened node ID. */ full_id?: boolean | string @@ -8936,7 +12942,9 @@ export interface CatNodesRequest extends CatCatRequestBase { include_unloaded_segments?: boolean /** List of columns to appear in the response. Supports simple wildcards. */ h?: Names - /** List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. */ + /** List of columns that determine how the table should be sorted. + * Sorting defaults to ascending and can be changed by setting `:asc` + * or `:desc` as a suffix to the column name. */ s?: Names /** Period to wait for a connection to the master node. */ master_timeout?: Duration @@ -8951,22 +12959,39 @@ export interface CatNodesRequest extends CatCatRequestBase { export type CatNodesResponse = CatNodesNodesRecord[] export interface CatPendingTasksPendingTasksRecord { + /** The task insertion order. */ insertOrder?: string + /** The task insertion order. + * @alias insertOrder */ o?: string + /** Indicates how long the task has been in queue. */ timeInQueue?: string + /** Indicates how long the task has been in queue. + * @alias timeInQueue */ t?: string + /** The task priority. */ priority?: string + /** The task priority. + * @alias priority */ p?: string + /** The task source. */ source?: string + /** The task source. + * @alias source */ s?: string } export interface CatPendingTasksRequest extends CatCatRequestBase { -/** List of columns to appear in the response. Supports simple wildcards. */ + /** List of columns to appear in the response. Supports simple wildcards. */ h?: Names - /** List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. */ + /** List of columns that determine how the table should be sorted. + * Sorting defaults to ascending and can be changed by setting `:asc` + * or `:desc` as a suffix to the column name. */ s?: Names - /** If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node. */ + /** If `true`, the request computes the list of selected nodes from the + * local cluster state. If `false` the list of selected nodes are computed + * from the cluster state of the master node. In both cases the coordinating + * node will send requests for further information to each selected node. */ local?: boolean /** Period to wait for a connection to the master node. */ master_timeout?: Duration @@ -8981,27 +13006,48 @@ export interface CatPendingTasksRequest extends CatCatRequestBase { export type CatPendingTasksResponse = CatPendingTasksPendingTasksRecord[] export interface CatPluginsPluginsRecord { + /** The unique node identifier. */ id?: NodeId + /** The node name. */ name?: Name + /** The node name. + * @alias name */ n?: Name + /** The component name. */ component?: string + /** The component name. + * @alias component */ c?: string + /** The component version. */ version?: VersionString + /** The component version. + * @alias version */ v?: VersionString + /** The plugin details. */ description?: string + /** The plugin details. + * @alias description */ d?: string + /** The plugin type. */ type?: string + /** The plugin type. + * @alias type */ t?: string } export interface CatPluginsRequest extends CatCatRequestBase { -/** List of columns to appear in the response. Supports simple wildcards. */ + /** List of columns to appear in the response. Supports simple wildcards. */ h?: Names - /** List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. */ + /** List of columns that determine how the table should be sorted. + * Sorting defaults to ascending and can be changed by setting `:asc` + * or `:desc` as a suffix to the column name. */ s?: Names /** Include bootstrap plugins in the response */ include_bootstrap?: boolean - /** If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node. */ + /** If `true`, the request computes the list of selected nodes from the + * local cluster state. If `false` the list of selected nodes are computed + * from the cluster state of the master node. In both cases the coordinating + * node will send requests for further information to each selected node. */ local?: boolean /** Period to wait for a connection to the master node. */ master_timeout?: Duration @@ -9014,65 +13060,150 @@ export interface CatPluginsRequest extends CatCatRequestBase { export type CatPluginsResponse = CatPluginsPluginsRecord[] export interface CatRecoveryRecoveryRecord { + /** The index name. */ index?: IndexName + /** The index name. + * @alias index */ i?: IndexName + /** The index name. + * @alias index */ idx?: IndexName + /** The shard name. */ shard?: string + /** The shard name. + * @alias shard */ s?: string + /** The shard name. + * @alias shard */ sh?: string + /** The recovery start time. */ start_time?: DateTime + /** The recovery start time. + * @alias start_time */ start?: DateTime + /** The recovery start time in epoch milliseconds. */ start_time_millis?: EpochTime + /** The recovery start time in epoch milliseconds. + * @alias start_time_millis */ start_millis?: EpochTime + /** The recovery stop time. */ stop_time?: DateTime + /** The recovery stop time. + * @alias stop_time */ stop?: DateTime + /** The recovery stop time in epoch milliseconds. */ stop_time_millis?: EpochTime + /** The recovery stop time in epoch milliseconds. + * @alias stop_time_millis */ stop_millis?: EpochTime + /** The recovery time. */ time?: Duration + /** The recovery time. + * @alias time */ t?: Duration + /** The recovery time. + * @alias time */ ti?: Duration + /** The recovery type. */ type?: string + /** The recovery type. + * @alias type */ ty?: string + /** The recovery stage. */ stage?: string + /** The recovery stage. + * @alias stage */ st?: string + /** The source host. */ source_host?: string + /** The source host. + * @alias source_host */ shost?: string + /** The source node name. */ source_node?: string + /** The source node name. + * @alias source_node */ snode?: string + /** The target host. */ target_host?: string + /** The target host. + * @alias target_host */ thost?: string + /** The target node name. */ target_node?: string + /** The target node name. + * @alias target_node */ tnode?: string + /** The repository name. */ repository?: string + /** The repository name. + * @alias repository */ rep?: string + /** The snapshot name. */ snapshot?: string + /** The snapshot name. + * @alias snapshot */ snap?: string + /** The number of files to recover. */ files?: string + /** The number of files to recover. + * @alias files */ f?: string + /** The files recovered. */ files_recovered?: string + /** The files recovered. + * @alias files_recovered */ fr?: string + /** The ratio of files recovered. */ files_percent?: Percentage + /** The ratio of files recovered. + * @alias files_percent */ fp?: Percentage + /** The total number of files. */ files_total?: string + /** The total number of files. + * @alias files_total */ tf?: string + /** The number of bytes to recover. */ bytes?: string + /** The number of bytes to recover. + * @alias bytes */ b?: string + /** The bytes recovered. */ bytes_recovered?: string + /** The bytes recovered. + * @alias bytes_recovered */ br?: string + /** The ratio of bytes recovered. */ bytes_percent?: Percentage + /** The ratio of bytes recovered. + * @alias bytes_percent */ bp?: Percentage + /** The total number of bytes. */ bytes_total?: string + /** The total number of bytes. + * @alias bytes_total */ tb?: string + /** The number of translog operations to recover. */ translog_ops?: string + /** The number of translog operations to recover. + * @alias translog_ops */ to?: string + /** The translog operations recovered. */ translog_ops_recovered?: string + /** The translog operations recovered. + * @alias translog_ops_recovered */ tor?: string + /** The ratio of translog operations recovered. */ translog_ops_percent?: Percentage + /** The ratio of translog operations recovered. + * @alias translog_ops_percent */ top?: Percentage } export interface CatRecoveryRequest extends CatCatRequestBase { -/** A comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. */ + /** A comma-separated list of data streams, indices, and aliases used to limit the request. + * Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. */ index?: Indices /** If `true`, the response only includes ongoing shard recoveries. */ active_only?: boolean @@ -9082,7 +13213,9 @@ export interface CatRecoveryRequest extends CatCatRequestBase { detailed?: boolean /** List of columns to appear in the response. Supports simple wildcards. */ h?: Names - /** List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. */ + /** List of columns that determine how the table should be sorted. + * Sorting defaults to ascending and can be changed by setting `:asc` + * or `:desc` as a suffix to the column name. */ s?: Names /** Unit used to display time values. */ time?: TimeUnit @@ -9095,18 +13228,29 @@ export interface CatRecoveryRequest extends CatCatRequestBase { export type CatRecoveryResponse = CatRecoveryRecoveryRecord[] export interface CatRepositoriesRepositoriesRecord { + /** The unique repository identifier. */ id?: string + /** The unique repository identifier. + * @alias id */ repoId?: string + /** The repository type. */ type?: string + /** The repository type. + * @alias type */ t?: string } export interface CatRepositoriesRequest extends CatCatRequestBase { -/** List of columns to appear in the response. Supports simple wildcards. */ + /** List of columns to appear in the response. Supports simple wildcards. */ h?: Names - /** List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. */ + /** List of columns that determine how the table should be sorted. + * Sorting defaults to ascending and can be changed by setting `:asc` + * or `:desc` as a suffix to the column name. */ s?: Names - /** If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node. */ + /** If `true`, the request computes the list of selected nodes from the + * local cluster state. If `false` the list of selected nodes are computed + * from the cluster state of the master node. In both cases the coordinating + * node will send requests for further information to each selected node. */ local?: boolean /** Period to wait for a connection to the master node. */ master_timeout?: Duration @@ -9119,15 +13263,22 @@ export interface CatRepositoriesRequest extends CatCatRequestBase { export type CatRepositoriesResponse = CatRepositoriesRepositoriesRecord[] export interface CatSegmentsRequest extends CatCatRequestBase { -/** A comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. */ + /** A comma-separated list of data streams, indices, and aliases used to limit the request. + * Supports wildcards (`*`). + * To target all data streams and indices, omit this parameter or use `*` or `_all`. */ index?: Indices /** The unit used to display byte values. */ bytes?: Bytes /** List of columns to appear in the response. Supports simple wildcards. */ h?: Names - /** List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. */ + /** List of columns that determine how the table should be sorted. + * Sorting defaults to ascending and can be changed by setting `:asc` + * or `:desc` as a suffix to the column name. */ s?: Names - /** If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node. */ + /** If `true`, the request computes the list of selected nodes from the + * local cluster state. If `false` the list of selected nodes are computed + * from the cluster state of the master node. In both cases the coordinating + * node will send requests for further information to each selected node. */ local?: boolean /** Period to wait for a connection to the master node. */ master_timeout?: Duration @@ -9140,55 +13291,155 @@ export interface CatSegmentsRequest extends CatCatRequestBase { export type CatSegmentsResponse = CatSegmentsSegmentsRecord[] export interface CatSegmentsSegmentsRecord { + /** The index name. */ index?: IndexName + /** The index name. + * @alias index */ i?: IndexName + /** The index name. + * @alias index */ idx?: IndexName + /** The shard name. */ shard?: string + /** The shard name. + * @alias shard */ s?: string + /** The shard name. + * @alias shard */ sh?: string + /** The shard type: `primary` or `replica`. */ prirep?: string + /** The shard type: `primary` or `replica`. + * @alias prirep */ p?: string + /** The shard type: `primary` or `replica`. + * @alias prirep */ pr?: string + /** The shard type: `primary` or `replica`. + * @alias prirep */ primaryOrReplica?: string + /** The IP address of the node where it lives. */ ip?: string + /** The unique identifier of the node where it lives. */ id?: NodeId + /** The segment name, which is derived from the segment generation and used internally to create file names in the directory of the shard. */ segment?: string + /** The segment name, which is derived from the segment generation and used internally to create file names in the directory of the shard. + * @alias segment */ seg?: string + /** The segment generation number. + * Elasticsearch increments this generation number for each segment written then uses this number to derive the segment name. */ generation?: string + /** The segment generation number. + * Elasticsearch increments this generation number for each segment written then uses this number to derive the segment name. + * @alias generation */ g?: string + /** The segment generation number. + * Elasticsearch increments this generation number for each segment written then uses this number to derive the segment name. + * @alias generation */ gen?: string + /** The number of documents in the segment. + * This excludes deleted documents and counts any nested documents separately from their parents. + * It also excludes documents which were indexed recently and do not yet belong to a segment. */ 'docs.count'?: string + /** The number of documents in the segment. + * This excludes deleted documents and counts any nested documents separately from their parents. + * It also excludes documents which were indexed recently and do not yet belong to a segment. + * @alias 'docs.count' */ dc?: string + /** The number of documents in the segment. + * This excludes deleted documents and counts any nested documents separately from their parents. + * It also excludes documents which were indexed recently and do not yet belong to a segment. + * @alias 'docs.count' */ docsCount?: string + /** The number of deleted documents in the segment, which might be higher or lower than the number of delete operations you have performed. + * This number excludes deletes that were performed recently and do not yet belong to a segment. + * Deleted documents are cleaned up by the automatic merge process if it makes sense to do so. + * Also, Elasticsearch creates extra deleted documents to internally track the recent history of operations on a shard. */ 'docs.deleted'?: string + /** The number of deleted documents in the segment, which might be higher or lower than the number of delete operations you have performed. + * This number excludes deletes that were performed recently and do not yet belong to a segment. + * Deleted documents are cleaned up by the automatic merge process if it makes sense to do so. + * Also, Elasticsearch creates extra deleted documents to internally track the recent history of operations on a shard. + * @alias 'docs.deleted' */ dd?: string + /** The number of deleted documents in the segment, which might be higher or lower than the number of delete operations you have performed. + * This number excludes deletes that were performed recently and do not yet belong to a segment. + * Deleted documents are cleaned up by the automatic merge process if it makes sense to do so. + * Also, Elasticsearch creates extra deleted documents to internally track the recent history of operations on a shard. + * @alias 'docs.deleted' */ docsDeleted?: string + /** The segment size in bytes. */ size?: ByteSize + /** The segment size in bytes. + * @alias size */ si?: ByteSize + /** The segment memory in bytes. + * A value of `-1` indicates Elasticsearch was unable to compute this number. */ 'size.memory'?: ByteSize + /** The segment memory in bytes. + * A value of `-1` indicates Elasticsearch was unable to compute this number. + * @alias 'size.memory' */ sm?: ByteSize + /** The segment memory in bytes. + * A value of `-1` indicates Elasticsearch was unable to compute this number. + * @alias 'size.memory' */ sizeMemory?: ByteSize + /** If `true`, the segment is synced to disk. + * Segments that are synced can survive a hard reboot. + * If `false`, the data from uncommitted segments is also stored in the transaction log so that Elasticsearch is able to replay changes on the next start. */ committed?: string + /** If `true`, the segment is synced to disk. + * Segments that are synced can survive a hard reboot. + * If `false`, the data from uncommitted segments is also stored in the transaction log so that Elasticsearch is able to replay changes on the next start. + * @alias committed */ ic?: string + /** If `true`, the segment is synced to disk. + * Segments that are synced can survive a hard reboot. + * If `false`, the data from uncommitted segments is also stored in the transaction log so that Elasticsearch is able to replay changes on the next start. + * @alias committed */ isCommitted?: string + /** If `true`, the segment is searchable. + * If `false`, the segment has most likely been written to disk but needs a refresh to be searchable. */ searchable?: string + /** If `true`, the segment is searchable. + * If `false`, the segment has most likely been written to disk but needs a refresh to be searchable. + * @alias searchable */ is?: string + /** If `true`, the segment is searchable. + * If `false`, the segment has most likely been written to disk but needs a refresh to be searchable. + * @alias searchable */ isSearchable?: string + /** The version of Lucene used to write the segment. */ version?: VersionString + /** The version of Lucene used to write the segment. + * @alias version */ v?: VersionString + /** If `true`, the segment is stored in a compound file. + * This means Lucene merged all files from the segment in a single file to save file descriptors. */ compound?: string + /** If `true`, the segment is stored in a compound file. + * This means Lucene merged all files from the segment in a single file to save file descriptors. + * @alias compound */ ico?: string + /** If `true`, the segment is stored in a compound file. + * This means Lucene merged all files from the segment in a single file to save file descriptors. + * @alias compound */ isCompound?: string } export interface CatShardsRequest extends CatCatRequestBase { -/** A comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. */ + /** A comma-separated list of data streams, indices, and aliases used to limit the request. + * Supports wildcards (`*`). + * To target all data streams and indices, omit this parameter or use `*` or `_all`. */ index?: Indices /** The unit used to display byte values. */ bytes?: Bytes /** List of columns to appear in the response. Supports simple wildcards. */ h?: Names - /** List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. */ + /** List of columns that determine how the table should be sorted. + * Sorting defaults to ascending and can be changed by setting `:asc` + * or `:desc` as a suffix to the column name. */ s?: Names /** Period to wait for a connection to the master node. */ master_timeout?: Duration @@ -9203,228 +13454,630 @@ export interface CatShardsRequest extends CatCatRequestBase { export type CatShardsResponse = CatShardsShardsRecord[] export interface CatShardsShardsRecord { + /** The index name. */ index?: string + /** The index name. + * @alias index */ i?: string + /** The index name. + * @alias index */ idx?: string + /** The shard name. */ shard?: string + /** The shard name. + * @alias shard */ s?: string + /** The shard name. + * @alias shard */ sh?: string + /** The shard type: `primary` or `replica`. */ prirep?: string + /** The shard type: `primary` or `replica`. + * @alias prirep */ p?: string + /** The shard type: `primary` or `replica`. + * @alias prirep */ pr?: string + /** The shard type: `primary` or `replica`. + * @alias prirep */ primaryOrReplica?: string + /** The shard state. + * Returned values include: + * `INITIALIZING`: The shard is recovering from a peer shard or gateway. + * `RELOCATING`: The shard is relocating. + * `STARTED`: The shard has started. + * `UNASSIGNED`: The shard is not assigned to any node. */ state?: string + /** The shard state. + * Returned values include: + * `INITIALIZING`: The shard is recovering from a peer shard or gateway. + * `RELOCATING`: The shard is relocating. + * `STARTED`: The shard has started. + * `UNASSIGNED`: The shard is not assigned to any node. + * @alias state */ st?: string + /** The number of documents in the shard. */ docs?: string | null + /** The number of documents in the shard. + * @alias docs */ d?: string | null + /** The number of documents in the shard. + * @alias docs */ dc?: string | null + /** The disk space used by the shard. */ store?: string | null + /** The disk space used by the shard. + * @alias store */ sto?: string | null + /** total size of dataset (including the cache for partially mounted indices) */ dataset?: string | null + /** The IP address of the node. */ ip?: string | null + /** The unique identifier for the node. */ id?: string + /** The name of node. */ node?: string | null + /** The name of node. + * @alias node */ n?: string | null + /** The sync identifier. */ sync_id?: string + /** The reason for the last change to the state of an unassigned shard. + * It does not explain why the shard is currently unassigned; use the cluster allocation explain API for that information. + * Returned values include: + * `ALLOCATION_FAILED`: Unassigned as a result of a failed allocation of the shard. + * `CLUSTER_RECOVERED`: Unassigned as a result of a full cluster recovery. + * `DANGLING_INDEX_IMPORTED`: Unassigned as a result of importing a dangling index. + * `EXISTING_INDEX_RESTORED`: Unassigned as a result of restoring into a closed index. + * `FORCED_EMPTY_PRIMARY`: The shard’s allocation was last modified by forcing an empty primary using the cluster reroute API. + * `INDEX_CLOSED`: Unassigned because the index was closed. + * `INDEX_CREATED`: Unassigned as a result of an API creation of an index. + * `INDEX_REOPENED`: Unassigned as a result of opening a closed index. + * `MANUAL_ALLOCATION`: The shard’s allocation was last modified by the cluster reroute API. + * `NEW_INDEX_RESTORED`: Unassigned as a result of restoring into a new index. + * `NODE_LEFT`: Unassigned as a result of the node hosting it leaving the cluster. + * `NODE_RESTARTING`: Similar to `NODE_LEFT`, except that the node was registered as restarting using the node shutdown API. + * `PRIMARY_FAILED`: The shard was initializing as a replica, but the primary shard failed before the initialization completed. + * `REALLOCATED_REPLICA`: A better replica location is identified and causes the existing replica allocation to be cancelled. + * `REINITIALIZED`: When a shard moves from started back to initializing. + * `REPLICA_ADDED`: Unassigned as a result of explicit addition of a replica. + * `REROUTE_CANCELLED`: Unassigned as a result of explicit cancel reroute command. */ 'unassigned.reason'?: string + /** The reason for the last change to the state of an unassigned shard. + * It does not explain why the shard is currently unassigned; use the cluster allocation explain API for that information. + * Returned values include: + * `ALLOCATION_FAILED`: Unassigned as a result of a failed allocation of the shard. + * `CLUSTER_RECOVERED`: Unassigned as a result of a full cluster recovery. + * `DANGLING_INDEX_IMPORTED`: Unassigned as a result of importing a dangling index. + * `EXISTING_INDEX_RESTORED`: Unassigned as a result of restoring into a closed index. + * `FORCED_EMPTY_PRIMARY`: The shard’s allocation was last modified by forcing an empty primary using the cluster reroute API. + * `INDEX_CLOSED`: Unassigned because the index was closed. + * `INDEX_CREATED`: Unassigned as a result of an API creation of an index. + * `INDEX_REOPENED`: Unassigned as a result of opening a closed index. + * `MANUAL_ALLOCATION`: The shard’s allocation was last modified by the cluster reroute API. + * `NEW_INDEX_RESTORED`: Unassigned as a result of restoring into a new index. + * `NODE_LEFT`: Unassigned as a result of the node hosting it leaving the cluster. + * `NODE_RESTARTING`: Similar to `NODE_LEFT`, except that the node was registered as restarting using the node shutdown API. + * `PRIMARY_FAILED`: The shard was initializing as a replica, but the primary shard failed before the initialization completed. + * `REALLOCATED_REPLICA`: A better replica location is identified and causes the existing replica allocation to be cancelled. + * `REINITIALIZED`: When a shard moves from started back to initializing. + * `REPLICA_ADDED`: Unassigned as a result of explicit addition of a replica. + * `REROUTE_CANCELLED`: Unassigned as a result of explicit cancel reroute command. + * @alias 'unassigned.reason' */ ur?: string + /** The time at which the shard became unassigned in Coordinated Universal Time (UTC). */ 'unassigned.at'?: string + /** The time at which the shard became unassigned in Coordinated Universal Time (UTC). + * @alias 'unassigned.at' */ ua?: string + /** The time at which the shard was requested to be unassigned in Coordinated Universal Time (UTC). */ 'unassigned.for'?: string + /** The time at which the shard was requested to be unassigned in Coordinated Universal Time (UTC). + * @alias 'unassigned.for' */ uf?: string + /** Additional details as to why the shard became unassigned. + * It does not explain why the shard is not assigned; use the cluster allocation explain API for that information. */ 'unassigned.details'?: string + /** Additional details as to why the shard became unassigned. + * It does not explain why the shard is not assigned; use the cluster allocation explain API for that information. + * @alias 'unassigned.details' */ ud?: string + /** The type of recovery source. */ 'recoverysource.type'?: string + /** The type of recovery source. + * @alias 'recoverysource.type' */ rs?: string + /** The size of completion. */ 'completion.size'?: string + /** The size of completion. + * @alias 'completion.size' */ cs?: string + /** The size of completion. + * @alias 'completion.size' */ completionSize?: string + /** The used fielddata cache memory. */ 'fielddata.memory_size'?: string + /** The used fielddata cache memory. + * @alias 'fielddata.memory_size' */ fm?: string + /** The used fielddata cache memory. + * @alias 'fielddata.memory_size' */ fielddataMemory?: string + /** The fielddata cache evictions. */ 'fielddata.evictions'?: string + /** The fielddata cache evictions. + * @alias 'fielddata.evictions' */ fe?: string + /** The fielddata cache evictions. + * @alias 'fielddata.evictions' */ fielddataEvictions?: string + /** The used query cache memory. */ 'query_cache.memory_size'?: string + /** The used query cache memory. + * @alias 'query_cache.memory_size' */ qcm?: string + /** The used query cache memory. + * @alias 'query_cache.memory_size' */ queryCacheMemory?: string + /** The query cache evictions. */ 'query_cache.evictions'?: string + /** The query cache evictions. + * @alias 'query_cache.evictions' */ qce?: string + /** The query cache evictions. + * @alias 'query_cache.evictions' */ queryCacheEvictions?: string + /** The number of flushes. */ 'flush.total'?: string + /** The number of flushes. + * @alias 'flush.total' */ ft?: string + /** The number of flushes. + * @alias 'flush.total' */ flushTotal?: string + /** The time spent in flush. */ 'flush.total_time'?: string + /** The time spent in flush. + * @alias 'flush.total_time' */ ftt?: string + /** The time spent in flush. + * @alias 'flush.total_time' */ flushTotalTime?: string + /** The number of current get operations. */ 'get.current'?: string + /** The number of current get operations. + * @alias 'get.current' */ gc?: string + /** The number of current get operations. + * @alias 'get.current' */ getCurrent?: string + /** The time spent in get operations. */ 'get.time'?: string + /** The time spent in get operations. + * @alias 'get.time' */ gti?: string + /** The time spent in get operations. + * @alias 'get.time' */ getTime?: string + /** The number of get operations. */ 'get.total'?: string + /** The number of get operations. + * @alias 'get.total' */ gto?: string + /** The number of get operations. + * @alias 'get.total' */ getTotal?: string + /** The time spent in successful get operations. */ 'get.exists_time'?: string + /** The time spent in successful get operations. + * @alias 'get.exists_time' */ geti?: string + /** The time spent in successful get operations. + * @alias 'get.exists_time' */ getExistsTime?: string + /** The number of successful get operations. */ 'get.exists_total'?: string + /** The number of successful get operations. + * @alias 'get.exists_total' */ geto?: string + /** The number of successful get operations. + * @alias 'get.exists_total' */ getExistsTotal?: string + /** The time spent in failed get operations. */ 'get.missing_time'?: string + /** The time spent in failed get operations. + * @alias 'get.missing_time' */ gmti?: string + /** The time spent in failed get operations. + * @alias 'get.missing_time' */ getMissingTime?: string + /** The number of failed get operations. */ 'get.missing_total'?: string + /** The number of failed get operations. + * @alias 'get.missing_total' */ gmto?: string + /** The number of failed get operations. + * @alias 'get.missing_total' */ getMissingTotal?: string + /** The number of current deletion operations. */ 'indexing.delete_current'?: string + /** The number of current deletion operations. + * @alias 'indexing.delete_current' */ idc?: string + /** The number of current deletion operations. + * @alias 'indexing.delete_current' */ indexingDeleteCurrent?: string + /** The time spent in deletion operations. */ 'indexing.delete_time'?: string + /** The time spent in deletion operations. + * @alias 'indexing.delete_time' */ idti?: string + /** The time spent in deletion operations. + * @alias 'indexing.delete_time' */ indexingDeleteTime?: string + /** The number of delete operations. */ 'indexing.delete_total'?: string + /** The number of delete operations. + * @alias 'indexing.delete_total' */ idto?: string + /** The number of delete operations. + * @alias 'indexing.delete_total' */ indexingDeleteTotal?: string + /** The number of current indexing operations. */ 'indexing.index_current'?: string + /** The number of current indexing operations. + * @alias 'indexing.index_current' */ iic?: string + /** The number of current indexing operations. + * @alias 'indexing.index_current' */ indexingIndexCurrent?: string + /** The time spent in indexing operations. */ 'indexing.index_time'?: string + /** The time spent in indexing operations. + * @alias 'indexing.index_time' */ iiti?: string + /** The time spent in indexing operations. + * @alias 'indexing.index_time' */ indexingIndexTime?: string + /** The number of indexing operations. */ 'indexing.index_total'?: string + /** The number of indexing operations. + * @alias 'indexing.index_total' */ iito?: string + /** The number of indexing operations. + * @alias 'indexing.index_total' */ indexingIndexTotal?: string + /** The number of failed indexing operations. */ 'indexing.index_failed'?: string + /** The number of failed indexing operations. + * @alias 'indexing.index_failed' */ iif?: string + /** The number of failed indexing operations. + * @alias 'indexing.index_failed' */ indexingIndexFailed?: string + /** The number of current merge operations. */ 'merges.current'?: string + /** The number of current merge operations. + * @alias 'merges.current' */ mc?: string + /** The number of current merge operations. + * @alias 'merges.current' */ mergesCurrent?: string + /** The number of current merging documents. */ 'merges.current_docs'?: string + /** The number of current merging documents. + * @alias 'merges.current_docs' */ mcd?: string + /** The number of current merging documents. + * @alias 'merges.current_docs' */ mergesCurrentDocs?: string + /** The size of current merge operations. */ 'merges.current_size'?: string + /** The size of current merge operations. + * @alias 'merges.current_size' */ mcs?: string + /** The size of current merge operations. + * @alias 'merges.current_size' */ mergesCurrentSize?: string + /** The number of completed merge operations. */ 'merges.total'?: string + /** The number of completed merge operations. + * @alias 'merges.total' */ mt?: string + /** The number of completed merge operations. + * @alias 'merges.total' */ mergesTotal?: string + /** The nuber of merged documents. */ 'merges.total_docs'?: string + /** The nuber of merged documents. + * @alias 'merges.total_docs' */ mtd?: string + /** The nuber of merged documents. + * @alias 'merges.total_docs' */ mergesTotalDocs?: string + /** The size of current merges. */ 'merges.total_size'?: string + /** The size of current merges. + * @alias 'merges.total_size' */ mts?: string + /** The size of current merges. + * @alias 'merges.total_size' */ mergesTotalSize?: string + /** The time spent merging documents. */ 'merges.total_time'?: string + /** The time spent merging documents. + * @alias 'merges.total_time' */ mtt?: string + /** The time spent merging documents. + * @alias 'merges.total_time' */ mergesTotalTime?: string + /** The total number of refreshes. */ 'refresh.total'?: string + /** The time spent in refreshes. */ 'refresh.time'?: string + /** The total nunber of external refreshes. */ 'refresh.external_total'?: string + /** The total nunber of external refreshes. + * @alias 'refresh.external_total' */ rto?: string + /** The total nunber of external refreshes. + * @alias 'refresh.external_total' */ refreshTotal?: string + /** The time spent in external refreshes. */ 'refresh.external_time'?: string + /** The time spent in external refreshes. + * @alias 'refresh.external_time' */ rti?: string + /** The time spent in external refreshes. + * @alias 'refresh.external_time' */ refreshTime?: string + /** The number of pending refresh listeners. */ 'refresh.listeners'?: string + /** The number of pending refresh listeners. + * @alias 'refresh.listeners' */ rli?: string + /** The number of pending refresh listeners. + * @alias 'refresh.listeners' */ refreshListeners?: string + /** The current fetch phase operations. */ 'search.fetch_current'?: string + /** The current fetch phase operations. + * @alias 'search.fetch_current' */ sfc?: string + /** The current fetch phase operations. + * @alias 'search.fetch_current' */ searchFetchCurrent?: string + /** The time spent in fetch phase. */ 'search.fetch_time'?: string + /** The time spent in fetch phase. + * @alias 'search.fetch_time' */ sfti?: string + /** The time spent in fetch phase. + * @alias 'search.fetch_time' */ searchFetchTime?: string + /** The total number of fetch operations. */ 'search.fetch_total'?: string + /** The total number of fetch operations. + * @alias 'search.fetch_total' */ sfto?: string + /** The total number of fetch operations. + * @alias 'search.fetch_total' */ searchFetchTotal?: string + /** The number of open search contexts. */ 'search.open_contexts'?: string + /** The number of open search contexts. + * @alias 'search.open_contexts' */ so?: string + /** The number of open search contexts. + * @alias 'search.open_contexts' */ searchOpenContexts?: string + /** The current query phase operations. */ 'search.query_current'?: string + /** The current query phase operations. + * @alias 'search.query_current' */ sqc?: string + /** The current query phase operations. + * @alias 'search.query_current' */ searchQueryCurrent?: string + /** The time spent in query phase. */ 'search.query_time'?: string + /** The time spent in query phase. + * @alias 'search.query_time' */ sqti?: string + /** The time spent in query phase. + * @alias 'search.query_time' */ searchQueryTime?: string + /** The total number of query phase operations. */ 'search.query_total'?: string + /** The total number of query phase operations. + * @alias 'search.query_total' */ sqto?: string + /** The total number of query phase operations. + * @alias 'search.query_total' */ searchQueryTotal?: string + /** The open scroll contexts. */ 'search.scroll_current'?: string + /** The open scroll contexts. + * @alias 'search.scroll_current' */ scc?: string + /** The open scroll contexts. + * @alias 'search.scroll_current' */ searchScrollCurrent?: string + /** The time scroll contexts were held open. */ 'search.scroll_time'?: string + /** The time scroll contexts were held open. + * @alias 'search.scroll_time' */ scti?: string + /** The time scroll contexts were held open. + * @alias 'search.scroll_time' */ searchScrollTime?: string + /** The number of completed scroll contexts. */ 'search.scroll_total'?: string + /** The number of completed scroll contexts. + * @alias 'search.scroll_total' */ scto?: string + /** The number of completed scroll contexts. + * @alias 'search.scroll_total' */ searchScrollTotal?: string + /** The number of segments. */ 'segments.count'?: string + /** The number of segments. + * @alias 'segments.count' */ sc?: string + /** The number of segments. + * @alias 'segments.count' */ segmentsCount?: string + /** The memory used by segments. */ 'segments.memory'?: string + /** The memory used by segments. + * @alias 'segments.memory' */ sm?: string + /** The memory used by segments. + * @alias 'segments.memory' */ segmentsMemory?: string + /** The memory used by the index writer. */ 'segments.index_writer_memory'?: string + /** The memory used by the index writer. + * @alias 'segments.index_writer_memory' */ siwm?: string + /** The memory used by the index writer. + * @alias 'segments.index_writer_memory' */ segmentsIndexWriterMemory?: string + /** The memory used by the version map. */ 'segments.version_map_memory'?: string + /** The memory used by the version map. + * @alias 'segments.version_map_memory' */ svmm?: string + /** The memory used by the version map. + * @alias 'segments.version_map_memory' */ segmentsVersionMapMemory?: string + /** The memory used by fixed bit sets for nested object field types and export type filters for types referred in `_parent` fields. */ 'segments.fixed_bitset_memory'?: string + /** The memory used by fixed bit sets for nested object field types and export type filters for types referred in `_parent` fields. + * @alias 'segments.fixed_bitset_memory' */ sfbm?: string + /** The memory used by fixed bit sets for nested object field types and export type filters for types referred in `_parent` fields. + * @alias 'segments.fixed_bitset_memory' */ fixedBitsetMemory?: string + /** The maximum sequence number. */ 'seq_no.max'?: string + /** The maximum sequence number. + * @alias 'seq_no.max' */ sqm?: string + /** The maximum sequence number. + * @alias 'seq_no.max' */ maxSeqNo?: string + /** The local checkpoint. */ 'seq_no.local_checkpoint'?: string + /** The local checkpoint. + * @alias 'seq_no.local_checkpoint' */ sql?: string + /** The local checkpoint. + * @alias 'seq_no.local_checkpoint' */ localCheckpoint?: string + /** The global checkpoint. */ 'seq_no.global_checkpoint'?: string + /** The global checkpoint. + * @alias 'seq_no.global_checkpoint' */ sqg?: string + /** The global checkpoint. + * @alias 'seq_no.global_checkpoint' */ globalCheckpoint?: string + /** The number of current warmer operations. */ 'warmer.current'?: string + /** The number of current warmer operations. + * @alias 'warmer.current' */ wc?: string + /** The number of current warmer operations. + * @alias 'warmer.current' */ warmerCurrent?: string + /** The total number of warmer operations. */ 'warmer.total'?: string + /** The total number of warmer operations. + * @alias 'warmer.total' */ wto?: string + /** The total number of warmer operations. + * @alias 'warmer.total' */ warmerTotal?: string + /** The time spent in warmer operations. */ 'warmer.total_time'?: string + /** The time spent in warmer operations. + * @alias 'warmer.total_time' */ wtt?: string + /** The time spent in warmer operations. + * @alias 'warmer.total_time' */ warmerTotalTime?: string + /** The shard data path. */ 'path.data'?: string + /** The shard data path. + * @alias 'path.data' */ pd?: string + /** The shard data path. + * @alias 'path.data' */ dataPath?: string + /** The shard state path. */ 'path.state'?: string + /** The shard state path. + * @alias 'path.state' */ ps?: string + /** The shard state path. + * @alias 'path.state' */ statsPath?: string + /** The number of bulk shard operations. */ 'bulk.total_operations'?: string + /** The number of bulk shard operations. + * @alias 'bulk.total_operations' */ bto?: string + /** The number of bulk shard operations. + * @alias 'bulk.total_operations' */ bulkTotalOperations?: string + /** The time spent in shard bulk operations. */ 'bulk.total_time'?: string + /** The time spent in shard bulk operations. + * @alias 'bulk.total_time' */ btti?: string + /** The time spent in shard bulk operations. + * @alias 'bulk.total_time' */ bulkTotalTime?: string + /** The total size in bytes of shard bulk operations. */ 'bulk.total_size_in_bytes'?: string + /** The total size in bytes of shard bulk operations. + * @alias 'bulk.total_size_in_bytes' */ btsi?: string + /** The total size in bytes of shard bulk operations. + * @alias 'bulk.total_size_in_bytes' */ bulkTotalSizeInBytes?: string + /** The average time spent in shard bulk operations. */ 'bulk.avg_time'?: string + /** The average time spent in shard bulk operations. + * @alias 'bulk.avg_time' */ bati?: string + /** The average time spent in shard bulk operations. + * @alias 'bulk.avg_time' */ bulkAvgTime?: string + /** The average size in bytes of shard bulk operations. */ 'bulk.avg_size_in_bytes'?: string + /** The average size in bytes of shard bulk operations. + * @alias 'bulk.avg_size_in_bytes' */ basi?: string + /** The average size in bytes of shard bulk operations. + * @alias 'bulk.avg_size_in_bytes' */ bulkAvgSizeInBytes?: string } export interface CatSnapshotsRequest extends CatCatRequestBase { -/** A comma-separated list of snapshot repositories used to limit the request. Accepts wildcard expressions. `_all` returns all repositories. If any repository fails during the request, Elasticsearch returns an error. */ + /** A comma-separated list of snapshot repositories used to limit the request. + * Accepts wildcard expressions. + * `_all` returns all repositories. + * If any repository fails during the request, Elasticsearch returns an error. */ repository?: Names /** If `true`, the response does not include information from unavailable snapshots. */ ignore_unavailable?: boolean /** List of columns to appear in the response. Supports simple wildcards. */ h?: Names - /** List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. */ + /** List of columns that determine how the table should be sorted. + * Sorting defaults to ascending and can be changed by setting `:asc` + * or `:desc` as a suffix to the column name. */ s?: Names /** Period to wait for a connection to the master node. */ master_timeout?: Duration @@ -9439,41 +14092,102 @@ export interface CatSnapshotsRequest extends CatCatRequestBase { export type CatSnapshotsResponse = CatSnapshotsSnapshotsRecord[] export interface CatSnapshotsSnapshotsRecord { + /** The unique identifier for the snapshot. */ id?: string + /** The unique identifier for the snapshot. + * @alias id */ snapshot?: string + /** The repository name. */ repository?: string + /** The repository name. + * @alias repository */ re?: string + /** The repository name. + * @alias repository */ repo?: string + /** The state of the snapshot process. + * Returned values include: + * `FAILED`: The snapshot process failed. + * `INCOMPATIBLE`: The snapshot process is incompatible with the current cluster version. + * `IN_PROGRESS`: The snapshot process started but has not completed. + * `PARTIAL`: The snapshot process completed with a partial success. + * `SUCCESS`: The snapshot process completed with a full success. */ status?: string + /** The state of the snapshot process. + * Returned values include: + * `FAILED`: The snapshot process failed. + * `INCOMPATIBLE`: The snapshot process is incompatible with the current cluster version. + * `IN_PROGRESS`: The snapshot process started but has not completed. + * `PARTIAL`: The snapshot process completed with a partial success. + * `SUCCESS`: The snapshot process completed with a full success. + * @alias status */ s?: string + /** The Unix epoch time (seconds since 1970-01-01 00:00:00) at which the snapshot process started. */ start_epoch?: SpecUtilsStringified> + /** The Unix epoch time (seconds since 1970-01-01 00:00:00) at which the snapshot process started. + * @alias start_epoch */ ste?: SpecUtilsStringified> + /** The Unix epoch time (seconds since 1970-01-01 00:00:00) at which the snapshot process started. + * @alias start_epoch */ startEpoch?: SpecUtilsStringified> + /** The time (HH:MM:SS) at which the snapshot process started. */ start_time?: WatcherScheduleTimeOfDay + /** The time (HH:MM:SS) at which the snapshot process started. + * @alias start_time */ sti?: WatcherScheduleTimeOfDay + /** The time (HH:MM:SS) at which the snapshot process started. + * @alias start_time */ startTime?: WatcherScheduleTimeOfDay + /** The Unix epoch time (seconds since 1970-01-01 00:00:00) at which the snapshot process ended. */ end_epoch?: SpecUtilsStringified> + /** The Unix epoch time (seconds since 1970-01-01 00:00:00) at which the snapshot process ended. + * @alias end_epoch */ ete?: SpecUtilsStringified> + /** The Unix epoch time (seconds since 1970-01-01 00:00:00) at which the snapshot process ended. + * @alias end_epoch */ endEpoch?: SpecUtilsStringified> + /** The time (HH:MM:SS) at which the snapshot process ended. */ end_time?: TimeOfDay + /** The time (HH:MM:SS) at which the snapshot process ended. + * @alias end_time */ eti?: TimeOfDay + /** The time (HH:MM:SS) at which the snapshot process ended. + * @alias end_time */ endTime?: TimeOfDay + /** The time it took the snapshot process to complete, in time units. */ duration?: Duration + /** The time it took the snapshot process to complete, in time units. + * @alias duration */ dur?: Duration + /** The number of indices in the snapshot. */ indices?: string + /** The number of indices in the snapshot. + * @alias indices */ i?: string + /** The number of successful shards in the snapshot. */ successful_shards?: string + /** The number of successful shards in the snapshot. + * @alias successful_shards */ ss?: string + /** The number of failed shards in the snapshot. */ failed_shards?: string + /** The number of failed shards in the snapshot. + * @alias failed_shards */ fs?: string + /** The total number of shards in the snapshot. */ total_shards?: string + /** The total number of shards in the snapshot. + * @alias total_shards */ ts?: string + /** The reason for any snapshot failures. */ reason?: string + /** The reason for any snapshot failures. + * @alias reason */ r?: string } export interface CatTasksRequest extends CatCatRequestBase { -/** The task action names, which are used to limit the response. */ + /** The task action names, which are used to limit the response. */ actions?: string[] /** If `true`, the response includes detailed information about shard recoveries. */ detailed?: boolean @@ -9483,11 +14197,14 @@ export interface CatTasksRequest extends CatCatRequestBase { parent_task_id?: string /** List of columns to appear in the response. Supports simple wildcards. */ h?: Names - /** List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. */ + /** List of columns that determine how the table should be sorted. + * Sorting defaults to ascending and can be changed by setting `:asc` + * or `:desc` as a suffix to the column name. */ s?: Names /** Unit used to display time values. */ time?: TimeUnit - /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ + /** Period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration /** If `true`, the request blocks until the task has completed. */ wait_for_completion?: boolean @@ -9500,48 +14217,102 @@ export interface CatTasksRequest extends CatCatRequestBase { export type CatTasksResponse = CatTasksTasksRecord[] export interface CatTasksTasksRecord { + /** The identifier of the task with the node. */ id?: Id + /** The task action. */ action?: string + /** The task action. + * @alias action */ ac?: string + /** The unique task identifier. */ task_id?: Id + /** The unique task identifier. + * @alias task_id */ ti?: Id + /** The parent task identifier. */ parent_task_id?: string + /** The parent task identifier. + * @alias parent_task_id */ pti?: string + /** The task type. */ type?: string + /** The task type. + * @alias type */ ty?: string + /** The start time in milliseconds. */ start_time?: string + /** The start time in milliseconds. + * @alias start_time */ start?: string + /** The start time in `HH:MM:SS` format. */ timestamp?: string + /** The start time in `HH:MM:SS` format. + * @alias timestamp */ ts?: string + /** The start time in `HH:MM:SS` format. + * @alias timestamp */ hms?: string + /** The start time in `HH:MM:SS` format. + * @alias timestamp */ hhmmss?: string + /** The running time in nanoseconds. */ running_time_ns?: string + /** The running time. */ running_time?: string + /** The running time. + * @alias running_time */ time?: string + /** The unique node identifier. */ node_id?: NodeId + /** The unique node identifier. + * @alias node_id */ ni?: NodeId + /** The IP address for the node. */ ip?: string + /** The IP address for the node. + * @alias ip */ i?: string + /** The bound transport port for the node. */ port?: string + /** The bound transport port for the node. + * @alias port */ po?: string + /** The node name. */ node?: string + /** The node name. + * @alias node */ n?: string + /** The Elasticsearch version. */ version?: VersionString + /** The Elasticsearch version. + * @alias version */ v?: VersionString + /** The X-Opaque-ID header. */ x_opaque_id?: string + /** The X-Opaque-ID header. + * @alias x_opaque_id */ x?: string + /** The task action description. */ description?: string + /** The task action description. + * @alias description */ desc?: string } export interface CatTemplatesRequest extends CatCatRequestBase { -/** The name of the template to return. Accepts wildcard expressions. If omitted, all templates are returned. */ + /** The name of the template to return. + * Accepts wildcard expressions. If omitted, all templates are returned. */ name?: Name /** List of columns to appear in the response. Supports simple wildcards. */ h?: Names - /** List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. */ + /** List of columns that determine how the table should be sorted. + * Sorting defaults to ascending and can be changed by setting `:asc` + * or `:desc` as a suffix to the column name. */ s?: Names - /** If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node. */ + /** If `true`, the request computes the list of selected nodes from the + * local cluster state. If `false` the list of selected nodes are computed + * from the cluster state of the master node. In both cases the coordinating + * node will send requests for further information to each selected node. */ local?: boolean /** Period to wait for a connection to the master node. */ master_timeout?: Duration @@ -9554,29 +14325,52 @@ export interface CatTemplatesRequest extends CatCatRequestBase { export type CatTemplatesResponse = CatTemplatesTemplatesRecord[] export interface CatTemplatesTemplatesRecord { + /** The template name. */ name?: Name + /** The template name. + * @alias name */ n?: Name + /** The template index patterns. */ index_patterns?: string + /** The template index patterns. + * @alias index_patterns */ t?: string + /** The template application order or priority number. */ order?: string + /** The template application order or priority number. + * @alias order */ o?: string + /** The template application order or priority number. + * @alias order */ p?: string + /** The template version. */ version?: VersionString | null + /** The template version. + * @alias version */ v?: VersionString | null + /** The component templates that comprise the index template. */ composed_of?: string + /** The component templates that comprise the index template. + * @alias composed_of */ c?: string } export interface CatThreadPoolRequest extends CatCatRequestBase { -/** A comma-separated list of thread pool names used to limit the request. Accepts wildcard expressions. */ + /** A comma-separated list of thread pool names used to limit the request. + * Accepts wildcard expressions. */ thread_pool_patterns?: Names /** List of columns to appear in the response. Supports simple wildcards. */ h?: Names - /** List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. */ + /** List of columns that determine how the table should be sorted. + * Sorting defaults to ascending and can be changed by setting `:asc` + * or `:desc` as a suffix to the column name. */ s?: Names /** The unit used to display time values. */ time?: TimeUnit - /** If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node. */ + /** If `true`, the request computes the list of selected nodes from the + * local cluster state. If `false` the list of selected nodes are computed + * from the cluster state of the master node. In both cases the coordinating + * node will send requests for further information to each selected node. */ local?: boolean /** Period to wait for a connection to the master node. */ master_timeout?: Duration @@ -9589,52 +14383,117 @@ export interface CatThreadPoolRequest extends CatCatRequestBase { export type CatThreadPoolResponse = CatThreadPoolThreadPoolRecord[] export interface CatThreadPoolThreadPoolRecord { + /** The node name. */ node_name?: string + /** The node name. + * @alias node_name */ nn?: string + /** The persistent node identifier. */ node_id?: NodeId + /** The persistent node identifier. + * @alias node_id */ id?: NodeId + /** The ephemeral node identifier. */ ephemeral_node_id?: string + /** The ephemeral node identifier. + * @alias ephemeral_node_id */ eid?: string + /** The process identifier. */ pid?: string + /** The process identifier. + * @alias pid */ p?: string + /** The host name for the current node. */ host?: string + /** The host name for the current node. + * @alias host */ h?: string + /** The IP address for the current node. */ ip?: string + /** The IP address for the current node. + * @alias ip */ i?: string + /** The bound transport port for the current node. */ port?: string + /** The bound transport port for the current node. + * @alias port */ po?: string + /** The thread pool name. */ name?: string + /** The thread pool name. + * @alias name */ n?: string + /** The thread pool type. + * Returned values include `fixed`, `fixed_auto_queue_size`, `direct`, and `scaling`. */ type?: string + /** The thread pool type. + * Returned values include `fixed`, `fixed_auto_queue_size`, `direct`, and `scaling`. + * @alias type */ t?: string + /** The number of active threads in the current thread pool. */ active?: string + /** The number of active threads in the current thread pool. + * @alias active */ a?: string + /** The number of threads in the current thread pool. */ pool_size?: string + /** The number of threads in the current thread pool. + * @alias pool_size */ psz?: string + /** The number of tasks currently in queue. */ queue?: string + /** The number of tasks currently in queue. + * @alias queue */ q?: string + /** The maximum number of tasks permitted in the queue. */ queue_size?: string + /** The maximum number of tasks permitted in the queue. + * @alias queue_size */ qs?: string + /** The number of rejected tasks. */ rejected?: string + /** The number of rejected tasks. + * @alias rejected */ r?: string + /** The highest number of active threads in the current thread pool. */ largest?: string + /** The highest number of active threads in the current thread pool. + * @alias largest */ l?: string + /** The number of completed tasks. */ completed?: string + /** The number of completed tasks. + * @alias completed */ c?: string + /** The core number of active threads allowed in a scaling thread pool. */ core?: string | null + /** The core number of active threads allowed in a scaling thread pool. + * @alias core */ cr?: string | null + /** The maximum number of active threads allowed in a scaling thread pool. */ max?: string | null + /** The maximum number of active threads allowed in a scaling thread pool. + * @alias max */ mx?: string | null + /** The number of active threads allowed in a fixed thread pool. */ size?: string | null + /** The number of active threads allowed in a fixed thread pool. + * @alias size */ sz?: string | null + /** The thread keep alive time. */ keep_alive?: string | null + /** The thread keep alive time. + * @alias keep_alive */ ka?: string | null } export interface CatTransformsRequest extends CatCatRequestBase { -/** A transform identifier or a wildcard expression. If you do not specify one of these options, the API returns information for all transforms. */ + /** A transform identifier or a wildcard expression. + * If you do not specify one of these options, the API returns information for all transforms. */ transform_id?: Id - /** Specifies what to do when the request: contains wildcard expressions and there are no transforms that match; contains the `_all` string or no identifiers and there are no matches; contains wildcard expressions and there are only partial matches. If `true`, it returns an empty transforms array when there are no matches and the subset of results when there are partial matches. If `false`, the request returns a 404 status code when there are no matches or only partial matches. */ + /** Specifies what to do when the request: contains wildcard expressions and there are no transforms that match; contains the `_all` string or no identifiers and there are no matches; contains wildcard expressions and there are only partial matches. + * If `true`, it returns an empty transforms array when there are no matches and the subset of results when there are partial matches. + * If `false`, the request returns a 404 status code when there are no matches or only partial matches. */ allow_no_match?: boolean /** Skips the specified number of transforms. */ from?: integer @@ -9655,131 +14514,301 @@ export interface CatTransformsRequest extends CatCatRequestBase { export type CatTransformsResponse = CatTransformsTransformsRecord[] export interface CatTransformsTransformsRecord { + /** The transform identifier. */ id?: Id + /** The status of the transform. + * Returned values include: + * `aborting`: The transform is aborting. + * `failed: The transform failed. For more information about the failure, check the `reason` field. + * `indexing`: The transform is actively processing data and creating new documents. + * `started`: The transform is running but not actively indexing data. + * `stopped`: The transform is stopped. + * `stopping`: The transform is stopping. */ state?: string + /** The status of the transform. + * Returned values include: + * `aborting`: The transform is aborting. + * `failed: The transform failed. For more information about the failure, check the `reason` field. + * `indexing`: The transform is actively processing data and creating new documents. + * `started`: The transform is running but not actively indexing data. + * `stopped`: The transform is stopped. + * `stopping`: The transform is stopping. + * @alias state */ s?: string + /** The sequence number for the checkpoint. */ checkpoint?: string + /** The sequence number for the checkpoint. + * @alias checkpoint */ c?: string + /** The number of documents that have been processed from the source index of the transform. */ documents_processed?: string + /** The number of documents that have been processed from the source index of the transform. + * @alias documents_processed */ docp?: string + /** The number of documents that have been processed from the source index of the transform. + * @alias documents_processed */ documentsProcessed?: string + /** The progress of the next checkpoint that is currently in progress. */ checkpoint_progress?: string | null + /** The progress of the next checkpoint that is currently in progress. + * @alias checkpoint_progress */ cp?: string | null + /** The progress of the next checkpoint that is currently in progress. + * @alias checkpoint_progress */ checkpointProgress?: string | null + /** The timestamp of the last search in the source indices. + * This field is shown only if the transform is running. */ last_search_time?: string | null + /** The timestamp of the last search in the source indices. + * This field is shown only if the transform is running. + * @alias last_search_time */ lst?: string | null + /** The timestamp of the last search in the source indices. + * This field is shown only if the transform is running. + * @alias last_search_time */ lastSearchTime?: string | null + /** The timestamp when changes were last detected in the source indices. */ changes_last_detection_time?: string | null + /** The timestamp when changes were last detected in the source indices. + * @alias changes_last_detection_time */ cldt?: string | null + /** The time the transform was created. */ create_time?: string + /** The time the transform was created. + * @alias create_time */ ct?: string + /** The time the transform was created. + * @alias create_time */ createTime?: string + /** The version of Elasticsearch that existed on the node when the transform was created. */ version?: VersionString + /** The version of Elasticsearch that existed on the node when the transform was created. + * @alias version */ v?: VersionString + /** The source indices for the transform. */ source_index?: string + /** The source indices for the transform. + * @alias source_index */ si?: string + /** The source indices for the transform. + * @alias source_index */ sourceIndex?: string + /** The destination index for the transform. */ dest_index?: string + /** The destination index for the transform. + * @alias dest_index */ di?: string + /** The destination index for the transform. + * @alias dest_index */ destIndex?: string + /** The unique identifier for the ingest pipeline. */ pipeline?: string + /** The unique identifier for the ingest pipeline. + * @alias pipeline */ p?: string + /** The description of the transform. */ description?: string + /** The description of the transform. + * @alias description */ d?: string + /** The type of transform: `batch` or `continuous`. */ transform_type?: string + /** The type of transform: `batch` or `continuous`. + * @alias transform_type */ tt?: string + /** The interval between checks for changes in the source indices when the transform is running continuously. */ frequency?: string + /** The interval between checks for changes in the source indices when the transform is running continuously. + * @alias frequency */ f?: string + /** The initial page size that is used for the composite aggregation for each checkpoint. */ max_page_search_size?: string + /** The initial page size that is used for the composite aggregation for each checkpoint. + * @alias max_page_search_size */ mpsz?: string + /** The number of input documents per second. */ docs_per_second?: string + /** The number of input documents per second. + * @alias docs_per_second */ dps?: string + /** If a transform has a `failed` state, these details describe the reason for failure. */ reason?: string + /** If a transform has a `failed` state, these details describe the reason for failure. + * @alias reason */ r?: string + /** The total number of search operations on the source index for the transform. */ search_total?: string + /** The total number of search operations on the source index for the transform. + * @alias search_total */ st?: string + /** The total number of search failures. */ search_failure?: string + /** The total number of search failures. + * @alias search_failure */ sf?: string + /** The total amount of search time, in milliseconds. */ search_time?: string + /** The total amount of search time, in milliseconds. + * @alias search_time */ stime?: string + /** The total number of index operations done by the transform. */ index_total?: string + /** The total number of index operations done by the transform. + * @alias index_total */ it?: string + /** The total number of indexing failures. */ index_failure?: string + /** The total number of indexing failures. + * @alias index_failure */ if?: string + /** The total time spent indexing documents, in milliseconds. */ index_time?: string + /** The total time spent indexing documents, in milliseconds. + * @alias index_time */ itime?: string + /** The number of documents that have been indexed into the destination index for the transform. */ documents_indexed?: string + /** The number of documents that have been indexed into the destination index for the transform. + * @alias documents_indexed */ doci?: string + /** The total time spent deleting documents, in milliseconds. */ delete_time?: string + /** The total time spent deleting documents, in milliseconds. + * @alias delete_time */ dtime?: string + /** The number of documents deleted from the destination index due to the retention policy for the transform. */ documents_deleted?: string + /** The number of documents deleted from the destination index due to the retention policy for the transform. + * @alias documents_deleted */ docd?: string + /** The number of times the transform has been triggered by the scheduler. + * For example, the scheduler triggers the transform indexer to check for updates or ingest new data at an interval specified in the `frequency` property. */ trigger_count?: string + /** The number of times the transform has been triggered by the scheduler. + * For example, the scheduler triggers the transform indexer to check for updates or ingest new data at an interval specified in the `frequency` property. + * @alias trigger_count */ tc?: string + /** The number of search or bulk index operations processed. + * Documents are processed in batches instead of individually. */ pages_processed?: string + /** The number of search or bulk index operations processed. + * Documents are processed in batches instead of individually. + * @alias pages_processed */ pp?: string + /** The total time spent processing results, in milliseconds. */ processing_time?: string + /** The total time spent processing results, in milliseconds. + * @alias processing_time */ pt?: string + /** The exponential moving average of the duration of the checkpoint, in milliseconds. */ checkpoint_duration_time_exp_avg?: string + /** The exponential moving average of the duration of the checkpoint, in milliseconds. + * @alias checkpoint_duration_time_exp_avg */ cdtea?: string + /** The exponential moving average of the duration of the checkpoint, in milliseconds. + * @alias checkpoint_duration_time_exp_avg */ checkpointTimeExpAvg?: string + /** The exponential moving average of the number of new documents that have been indexed. */ indexed_documents_exp_avg?: string + /** The exponential moving average of the number of new documents that have been indexed. + * @alias indexed_documents_exp_avg */ idea?: string + /** The exponential moving average of the number of documents that have been processed. */ processed_documents_exp_avg?: string + /** The exponential moving average of the number of documents that have been processed. + * @alias processed_documents_exp_avg */ pdea?: string } export interface CcrFollowIndexStats { + /** The name of the follower index. */ index: IndexName + /** An array of shard-level following task statistics. */ shards: CcrShardStats[] } export interface CcrReadException { + /** The exception that caused the read to fail. */ exception: ErrorCause + /** The starting sequence number of the batch requested from the leader. */ from_seq_no: SequenceNumber + /** The number of times the batch has been retried. */ retries: integer } export interface CcrShardStats { + /** The total of transferred bytes read from the leader. + * This is only an estimate and does not account for compression if enabled. */ bytes_read: long + /** The number of failed reads. */ failed_read_requests: long + /** The number of failed bulk write requests on the follower. */ failed_write_requests: long fatal_exception?: ErrorCause + /** The index aliases version the follower is synced up to. */ follower_aliases_version: VersionNumber + /** The current global checkpoint on the follower. + * The difference between the `leader_global_checkpoint` and the `follower_global_checkpoint` is an indication of how much the follower is lagging the leader. */ follower_global_checkpoint: long + /** The name of the follower index. */ follower_index: string + /** The mapping version the follower is synced up to. */ follower_mapping_version: VersionNumber + /** The current maximum sequence number on the follower. */ follower_max_seq_no: SequenceNumber + /** The index settings version the follower is synced up to. */ follower_settings_version: VersionNumber + /** The starting sequence number of the last batch of operations requested from the leader. */ last_requested_seq_no: SequenceNumber + /** The current global checkpoint on the leader known to the follower task. */ leader_global_checkpoint: long + /** The name of the index in the leader cluster being followed. */ leader_index: string + /** The current maximum sequence number on the leader known to the follower task. */ leader_max_seq_no: SequenceNumber + /** The total number of operations read from the leader. */ operations_read: long + /** The number of operations written on the follower. */ operations_written: long + /** The number of active read requests from the follower. */ outstanding_read_requests: integer + /** The number of active bulk write requests on the follower. */ outstanding_write_requests: integer + /** An array of objects representing failed reads. */ read_exceptions: CcrReadException[] + /** The remote cluster containing the leader index. */ remote_cluster: string + /** The numerical shard ID, with values from 0 to one less than the number of replicas. */ shard_id: integer + /** The number of successful fetches. */ successful_read_requests: long + /** The number of bulk write requests run on the follower. */ successful_write_requests: long time_since_last_read?: Duration + /** The number of milliseconds since a read request was sent to the leader. + * When the follower is caught up to the leader, this number will increase up to the configured `read_poll_timeout` at which point another read request will be sent to the leader. */ time_since_last_read_millis: DurationValue total_read_remote_exec_time?: Duration + /** The total time reads spent running on the remote cluster. */ total_read_remote_exec_time_millis: DurationValue total_read_time?: Duration + /** The total time reads were outstanding, measured from the time a read was sent to the leader to the time a reply was returned to the follower. */ total_read_time_millis: DurationValue total_write_time?: Duration + /** The total time spent writing on the follower. */ total_write_time_millis: DurationValue + /** The number of write operations queued on the follower. */ write_buffer_operation_count: long + /** The total number of bytes of operations currently queued for writing. */ write_buffer_size_in_bytes: ByteSize } export interface CcrDeleteAutoFollowPatternRequest extends RequestBase { -/** The auto-follow pattern collection to delete. */ + /** The auto-follow pattern collection to delete. */ name: Name - /** The period to wait for a connection to the master node. If the master node is not available before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout. */ + /** The period to wait for a connection to the master node. + * If the master node is not available before the timeout expires, the request fails and returns an error. + * It can also be set to `-1` to indicate that the request should never timeout. */ master_timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { name?: never, master_timeout?: never } @@ -9790,11 +14819,14 @@ export interface CcrDeleteAutoFollowPatternRequest extends RequestBase { export type CcrDeleteAutoFollowPatternResponse = AcknowledgedResponseBase export interface CcrFollowRequest extends RequestBase { -/** The name of the follower index. */ + /** The name of the follower index. */ index: IndexName /** Period to wait for a connection to the master node. */ master_timeout?: Duration - /** Specifies the number of shards to wait on being active before responding. This defaults to waiting on none of the shards to be active. A shard must be restored from the leader index before being active. Restoring a follower shard requires transferring all the remote Lucene segment files to the follower index. */ + /** Specifies the number of shards to wait on being active before responding. This defaults to waiting on none of the shards to be + * active. + * A shard must be restored from the leader index before being active. Restoring a follower shard requires transferring all the + * remote Lucene segment files to the follower index. */ wait_for_active_shards?: WaitForActiveShards /** If the leader index is part of a data stream, the name to which the local data stream for the followed index should be renamed. */ data_stream_name?: string @@ -9808,17 +14840,22 @@ export interface CcrFollowRequest extends RequestBase { max_read_request_operation_count?: integer /** The maximum size in bytes of per read of a batch of operations pulled from the remote cluster. */ max_read_request_size?: ByteSize - /** The maximum time to wait before retrying an operation that failed exceptionally. An exponential backoff strategy is employed when retrying. */ + /** The maximum time to wait before retrying an operation that failed exceptionally. An exponential backoff strategy is employed when + * retrying. */ max_retry_delay?: Duration - /** The maximum number of operations that can be queued for writing. When this limit is reached, reads from the remote cluster will be deferred until the number of queued operations goes below the limit. */ + /** The maximum number of operations that can be queued for writing. When this limit is reached, reads from the remote cluster will be + * deferred until the number of queued operations goes below the limit. */ max_write_buffer_count?: integer - /** The maximum total bytes of operations that can be queued for writing. When this limit is reached, reads from the remote cluster will be deferred until the total bytes of queued operations goes below the limit. */ + /** The maximum total bytes of operations that can be queued for writing. When this limit is reached, reads from the remote cluster will + * be deferred until the total bytes of queued operations goes below the limit. */ max_write_buffer_size?: ByteSize /** The maximum number of operations per bulk write request executed on the follower. */ max_write_request_operation_count?: integer /** The maximum total bytes of operations per bulk write request executed on the follower. */ max_write_request_size?: ByteSize - /** The maximum time to wait for new operations on the remote cluster when the follower index is synchronized with the leader index. When the timeout has elapsed, the poll for operations will return to the follower so that it can update some statistics. Then the follower will immediately attempt to read from the leader again. */ + /** The maximum time to wait for new operations on the remote cluster when the follower index is synchronized with the leader index. + * When the timeout has elapsed, the poll for operations will return to the follower so that it can update some statistics. + * Then the follower will immediately attempt to read from the leader again. */ read_poll_timeout?: Duration /** The remote cluster containing the leader index. */ remote_cluster: string @@ -9837,32 +14874,54 @@ export interface CcrFollowResponse { } export interface CcrFollowInfoFollowerIndex { + /** The name of the follower index. */ follower_index: IndexName + /** The name of the index in the leader cluster that is followed. */ leader_index: IndexName + /** An object that encapsulates cross-cluster replication parameters. If the follower index's status is paused, this object is omitted. */ parameters?: CcrFollowInfoFollowerIndexParameters + /** The remote cluster that contains the leader index. */ remote_cluster: Name + /** The status of the index following: `active` or `paused`. */ status: CcrFollowInfoFollowerIndexStatus } export interface CcrFollowInfoFollowerIndexParameters { + /** The maximum number of outstanding reads requests from the remote cluster. */ max_outstanding_read_requests?: long + /** The maximum number of outstanding write requests on the follower. */ max_outstanding_write_requests?: integer + /** The maximum number of operations to pull per read from the remote cluster. */ max_read_request_operation_count?: integer + /** The maximum size in bytes of per read of a batch of operations pulled from the remote cluster. */ max_read_request_size?: ByteSize + /** The maximum time to wait before retrying an operation that failed exceptionally. An exponential backoff strategy is employed when + * retrying. */ max_retry_delay?: Duration + /** The maximum number of operations that can be queued for writing. When this limit is reached, reads from the remote cluster will be + * deferred until the number of queued operations goes below the limit. */ max_write_buffer_count?: integer + /** The maximum total bytes of operations that can be queued for writing. When this limit is reached, reads from the remote cluster will + * be deferred until the total bytes of queued operations goes below the limit. */ max_write_buffer_size?: ByteSize + /** The maximum number of operations per bulk write request executed on the follower. */ max_write_request_operation_count?: integer + /** The maximum total bytes of operations per bulk write request executed on the follower. */ max_write_request_size?: ByteSize + /** The maximum time to wait for new operations on the remote cluster when the follower index is synchronized with the leader index. + * When the timeout has elapsed, the poll for operations will return to the follower so that it can update some statistics. + * Then the follower will immediately attempt to read from the leader again. */ read_poll_timeout?: Duration } export type CcrFollowInfoFollowerIndexStatus = 'active' | 'paused' export interface CcrFollowInfoRequest extends RequestBase { -/** A comma-delimited list of follower index patterns. */ + /** A comma-delimited list of follower index patterns. */ index: Indices - /** The period to wait for a connection to the master node. If the master node is not available before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout. */ + /** The period to wait for a connection to the master node. + * If the master node is not available before the timeout expires, the request fails and returns an error. + * It can also be set to `-1` to indicate that the request should never timeout. */ master_timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { index?: never, master_timeout?: never } @@ -9875,9 +14934,10 @@ export interface CcrFollowInfoResponse { } export interface CcrFollowStatsRequest extends RequestBase { -/** A comma-delimited list of index patterns. */ + /** A comma-delimited list of index patterns. */ index: Indices - /** The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ + /** The period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { index?: never, timeout?: never } @@ -9886,11 +14946,12 @@ export interface CcrFollowStatsRequest extends RequestBase { } export interface CcrFollowStatsResponse { + /** An array of follower index statistics. */ indices: CcrFollowIndexStats[] } export interface CcrForgetFollowerRequest extends RequestBase { -/** the name of the leader index for which specified follower retention leases should be removed */ + /** the name of the leader index for which specified follower retention leases should be removed */ index: IndexName /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration @@ -9915,17 +14976,25 @@ export interface CcrGetAutoFollowPatternAutoFollowPattern { export interface CcrGetAutoFollowPatternAutoFollowPatternSummary { active: boolean + /** The remote cluster containing the leader indices to match against. */ remote_cluster: string + /** The name of follower index. */ follow_index_pattern?: IndexPattern + /** An array of simple index patterns to match against indices in the remote cluster specified by the remote_cluster field. */ leader_index_patterns: IndexPatterns + /** An array of simple index patterns that can be used to exclude indices from being auto-followed. */ leader_index_exclusion_patterns: IndexPatterns + /** The maximum number of outstanding reads requests from the remote cluster. */ max_outstanding_read_requests: integer } export interface CcrGetAutoFollowPatternRequest extends RequestBase { -/** The auto-follow pattern collection that you want to retrieve. If you do not specify a name, the API returns information for all collections. */ + /** The auto-follow pattern collection that you want to retrieve. + * If you do not specify a name, the API returns information for all collections. */ name?: Name - /** The period to wait for a connection to the master node. If the master node is not available before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout. */ + /** The period to wait for a connection to the master node. + * If the master node is not available before the timeout expires, the request fails and returns an error. + * It can also be set to `-1` to indicate that the request should never timeout. */ master_timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { name?: never, master_timeout?: never } @@ -9938,9 +15007,11 @@ export interface CcrGetAutoFollowPatternResponse { } export interface CcrPauseAutoFollowPatternRequest extends RequestBase { -/** The name of the auto-follow pattern to pause. */ + /** The name of the auto-follow pattern to pause. */ name: Name - /** The period to wait for a connection to the master node. If the master node is not available before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout. */ + /** The period to wait for a connection to the master node. + * If the master node is not available before the timeout expires, the request fails and returns an error. + * It can also be set to `-1` to indicate that the request should never timeout. */ master_timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { name?: never, master_timeout?: never } @@ -9951,9 +15022,11 @@ export interface CcrPauseAutoFollowPatternRequest extends RequestBase { export type CcrPauseAutoFollowPatternResponse = AcknowledgedResponseBase export interface CcrPauseFollowRequest extends RequestBase { -/** The name of the follower index. */ + /** The name of the follower index. */ index: IndexName - /** The period to wait for a connection to the master node. If the master node is not available before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout. */ + /** The period to wait for a connection to the master node. + * If the master node is not available before the timeout expires, the request fails and returns an error. + * It can also be set to `-1` to indicate that the request should never timeout. */ master_timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { index?: never, master_timeout?: never } @@ -9964,7 +15037,7 @@ export interface CcrPauseFollowRequest extends RequestBase { export type CcrPauseFollowResponse = AcknowledgedResponseBase export interface CcrPutAutoFollowPatternRequest extends RequestBase { -/** The name of the collection of auto-follow patterns. */ + /** The name of the collection of auto-follow patterns. */ name: Name /** Period to wait for a connection to the master node. */ master_timeout?: Duration @@ -10007,9 +15080,11 @@ export interface CcrPutAutoFollowPatternRequest extends RequestBase { export type CcrPutAutoFollowPatternResponse = AcknowledgedResponseBase export interface CcrResumeAutoFollowPatternRequest extends RequestBase { -/** The name of the auto-follow pattern to resume. */ + /** The name of the auto-follow pattern to resume. */ name: Name - /** The period to wait for a connection to the master node. If the master node is not available before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout. */ + /** The period to wait for a connection to the master node. + * If the master node is not available before the timeout expires, the request fails and returns an error. + * It can also be set to `-1` to indicate that the request should never timeout. */ master_timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { name?: never, master_timeout?: never } @@ -10020,7 +15095,7 @@ export interface CcrResumeAutoFollowPatternRequest extends RequestBase { export type CcrResumeAutoFollowPatternResponse = AcknowledgedResponseBase export interface CcrResumeFollowRequest extends RequestBase { -/** The name of the follow index to resume following. */ + /** The name of the follow index to resume following. */ index: IndexName /** Period to wait for a connection to the master node. */ master_timeout?: Duration @@ -10044,9 +15119,14 @@ export type CcrResumeFollowResponse = AcknowledgedResponseBase export interface CcrStatsAutoFollowStats { auto_followed_clusters: CcrStatsAutoFollowedCluster[] + /** The number of indices that the auto-follow coordinator failed to automatically follow. + * The causes of recent failures are captured in the logs of the elected master node and in the `auto_follow_stats.recent_auto_follow_errors` field. */ number_of_failed_follow_indices: long + /** The number of times that the auto-follow coordinator failed to retrieve the cluster state from a remote cluster registered in a collection of auto-follow patterns. */ number_of_failed_remote_cluster_state_requests: long + /** The number of indices that the auto-follow coordinator successfully followed. */ number_of_successful_follow_indices: long + /** An array of objects representing failures by the auto-follow coordinator. */ recent_auto_follow_errors: ErrorCause[] } @@ -10061,7 +15141,9 @@ export interface CcrStatsFollowStats { } export interface CcrStatsRequest extends RequestBase { -/** The period to wait for a connection to the master node. If the master node is not available before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout. */ + /** The period to wait for a connection to the master node. + * If the master node is not available before the timeout expires, the request fails and returns an error. + * It can also be set to `-1` to indicate that the request should never timeout. */ master_timeout?: Duration /** The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration @@ -10072,14 +15154,18 @@ export interface CcrStatsRequest extends RequestBase { } export interface CcrStatsResponse { + /** Statistics for the auto-follow coordinator. */ auto_follow_stats: CcrStatsAutoFollowStats + /** Shard-level statistics for follower indices. */ follow_stats: CcrStatsFollowStats } export interface CcrUnfollowRequest extends RequestBase { -/** The name of the follower index. */ + /** The name of the follower index. */ index: IndexName - /** The period to wait for a connection to the master node. If the master node is not available before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout. */ + /** The period to wait for a connection to the master node. + * If the master node is not available before the timeout expires, the request fails and returns an error. + * It can also be set to `-1` to indicate that the request should never timeout. */ master_timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { index?: never, master_timeout?: never } @@ -10174,7 +15260,7 @@ export interface ClusterAllocationExplainNodeDiskUsage { } export interface ClusterAllocationExplainRequest extends RequestBase { -/** If true, returns information about disk usage and shard sizes. */ + /** If true, returns information about disk usage and shard sizes. */ include_disk_info?: boolean /** If true, returns YES decisions in explanation. */ include_yes_decisions?: boolean @@ -10242,11 +15328,13 @@ export interface ClusterAllocationExplainUnassignedInformation { export type ClusterAllocationExplainUnassignedInformationReason = 'INDEX_CREATED' | 'CLUSTER_RECOVERED' | 'INDEX_REOPENED' | 'DANGLING_INDEX_IMPORTED' | 'NEW_INDEX_RESTORED' | 'EXISTING_INDEX_RESTORED' | 'REPLICA_ADDED' | 'ALLOCATION_FAILED' | 'NODE_LEFT' | 'REROUTE_CANCELLED' | 'REINITIALIZED' | 'REALLOCATED_REPLICA' | 'PRIMARY_FAILED' | 'FORCED_EMPTY_PRIMARY' | 'MANUAL_ALLOCATION' export interface ClusterDeleteComponentTemplateRequest extends RequestBase { -/** Comma-separated list or wildcard expression of component template names used to limit the request. */ + /** Comma-separated list or wildcard expression of component template names used to limit the request. */ name: Names - /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ + /** Period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration - /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ + /** Period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { name?: never, master_timeout?: never, timeout?: never } @@ -10257,9 +15345,14 @@ export interface ClusterDeleteComponentTemplateRequest extends RequestBase { export type ClusterDeleteComponentTemplateResponse = AcknowledgedResponseBase export interface ClusterDeleteVotingConfigExclusionsRequest extends RequestBase { -/** Period to wait for a connection to the master node. */ + /** Period to wait for a connection to the master node. */ master_timeout?: Duration - /** Specifies whether to wait for all excluded nodes to be removed from the cluster before clearing the voting configuration exclusions list. Defaults to true, meaning that all excluded nodes must be removed from the cluster before this API takes any action. If set to false then the voting configuration exclusions list is cleared even if some excluded nodes are still in the cluster. */ + /** Specifies whether to wait for all excluded nodes to be removed from the + * cluster before clearing the voting configuration exclusions list. + * Defaults to true, meaning that all excluded nodes must be removed from + * the cluster before this API takes any action. If set to false then the + * voting configuration exclusions list is cleared even if some excluded + * nodes are still in the cluster. */ wait_for_removal?: boolean /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { master_timeout?: never, wait_for_removal?: never } @@ -10270,11 +15363,15 @@ export interface ClusterDeleteVotingConfigExclusionsRequest extends RequestBase export type ClusterDeleteVotingConfigExclusionsResponse = boolean export interface ClusterExistsComponentTemplateRequest extends RequestBase { -/** Comma-separated list of component template names used to limit the request. Wildcard (*) expressions are supported. */ + /** Comma-separated list of component template names used to limit the request. + * Wildcard (*) expressions are supported. */ name: Names - /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ + /** Period to wait for a connection to the master node. If no response is + * received before the timeout expires, the request fails and returns an + * error. */ master_timeout?: Duration - /** If true, the request retrieves information from the local node only. Defaults to false, which means information is retrieved from the master node. */ + /** If true, the request retrieves information from the local node only. + * Defaults to false, which means information is retrieved from the master node. */ local?: boolean /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { name?: never, master_timeout?: never, local?: never } @@ -10285,15 +15382,18 @@ export interface ClusterExistsComponentTemplateRequest extends RequestBase { export type ClusterExistsComponentTemplateResponse = boolean export interface ClusterGetComponentTemplateRequest extends RequestBase { -/** Comma-separated list of component template names used to limit the request. Wildcard (`*`) expressions are supported. */ + /** Comma-separated list of component template names used to limit the request. + * Wildcard (`*`) expressions are supported. */ name?: Name /** If `true`, returns settings in flat format. */ flat_settings?: boolean /** Return all default configurations for the component template (default: false) */ include_defaults?: boolean - /** If `true`, the request retrieves information from the local node only. If `false`, information is retrieved from the master node. */ + /** If `true`, the request retrieves information from the local node only. + * If `false`, information is retrieved from the master node. */ local?: boolean - /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ + /** Period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { name?: never, flat_settings?: never, include_defaults?: never, local?: never, master_timeout?: never } @@ -10306,13 +15406,15 @@ export interface ClusterGetComponentTemplateResponse { } export interface ClusterGetSettingsRequest extends RequestBase { -/** If `true`, returns settings in flat format. */ + /** If `true`, returns settings in flat format. */ flat_settings?: boolean /** If `true`, returns default cluster settings from the local node. */ include_defaults?: boolean - /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ + /** Period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration - /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ + /** Period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { flat_settings?: never, include_defaults?: never, master_timeout?: never, timeout?: never } @@ -10327,23 +15429,41 @@ export interface ClusterGetSettingsResponse { } export interface ClusterHealthHealthResponseBody { + /** The number of active primary shards. */ active_primary_shards: integer + /** The total number of active primary and replica shards. */ active_shards: integer - active_shards_percent_as_number: Percentage + /** The ratio of active shards in the cluster expressed as a string formatted percentage. */ + active_shards_percent?: string + /** The ratio of active shards in the cluster expressed as a percentage. */ + active_shards_percent_as_number: double + /** The name of the cluster. */ cluster_name: Name + /** The number of shards whose allocation has been delayed by the timeout settings. */ delayed_unassigned_shards: integer indices?: Record + /** The number of shards that are under initialization. */ initializing_shards: integer + /** The number of nodes that are dedicated data nodes. */ number_of_data_nodes: integer + /** The number of unfinished fetches. */ number_of_in_flight_fetch: integer + /** The number of nodes within the cluster. */ number_of_nodes: integer + /** The number of cluster-level changes that have not yet been executed. */ number_of_pending_tasks: integer + /** The number of shards that are under relocation. */ relocating_shards: integer status: HealthStatus + /** The time since the earliest initiated task is waiting for being performed. */ task_max_waiting_in_queue?: Duration + /** The time expressed in milliseconds since the earliest initiated task is waiting for being performed. */ task_max_waiting_in_queue_millis: DurationValue + /** If false the response returned within the period of time that is specified by the timeout parameter (30s by default) */ timed_out: boolean + /** The number of primary shards that are not allocated. */ unassigned_primary_shards: integer + /** The number of shards that are not allocated. */ unassigned_shards: integer } @@ -10361,7 +15481,7 @@ export interface ClusterHealthIndexHealthStats { } export interface ClusterHealthRequest extends RequestBase { -/** Comma-separated list of data streams, indices, and index aliases used to limit the request. Wildcard expressions (`*`) are supported. To target all data streams and indices in a cluster, omit this parameter or use _all or `*`. */ + /** Comma-separated list of data streams, indices, and index aliases used to limit the request. Wildcard expressions (`*`) are supported. To target all data streams and indices in a cluster, omit this parameter or use _all or `*`. */ index?: Indices /** Whether to expand wildcard expression to concrete indices that are open, closed or both. */ expand_wildcards?: ExpandWildcards @@ -10378,7 +15498,7 @@ export interface ClusterHealthRequest extends RequestBase { /** Can be one of immediate, urgent, high, normal, low, languid. Wait until all currently queued events with the given priority are processed. */ wait_for_events?: WaitForEvents /** The request waits until the specified number N of nodes is available. It also accepts >=N, <=N, >N and `URGENT` > `HIGH` > `NORMAL` > `LOW` > `LANGUID`. */ priority: string + /** A general description of the cluster task that may include a reason and origin. */ source: string + /** The time since the task is waiting for being performed. */ time_in_queue?: Duration + /** The time expressed in milliseconds since the task is waiting for being performed. */ time_in_queue_millis: DurationValue } export interface ClusterPendingTasksRequest extends RequestBase { -/** If `true`, the request retrieves information from the local node only. If `false`, information is retrieved from the master node. */ + /** If `true`, the request retrieves information from the local node only. + * If `false`, information is retrieved from the master node. */ local?: boolean - /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ + /** Period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { local?: never, master_timeout?: never } @@ -10445,13 +15576,18 @@ export interface ClusterPendingTasksResponse { } export interface ClusterPostVotingConfigExclusionsRequest extends RequestBase { -/** A comma-separated list of the names of the nodes to exclude from the voting configuration. If specified, you may not also specify node_ids. */ + /** A comma-separated list of the names of the nodes to exclude from the + * voting configuration. If specified, you may not also specify node_ids. */ node_names?: Names - /** A comma-separated list of the persistent ids of the nodes to exclude from the voting configuration. If specified, you may not also specify node_names. */ + /** A comma-separated list of the persistent ids of the nodes to exclude + * from the voting configuration. If specified, you may not also specify node_names. */ node_ids?: Ids /** Period to wait for a connection to the master node. */ master_timeout?: Duration - /** When adding a voting configuration exclusion, the API waits for the specified nodes to be excluded from the voting configuration before returning. If the timeout expires before the appropriate condition is satisfied, the request fails and returns an error. */ + /** When adding a voting configuration exclusion, the API waits for the + * specified nodes to be excluded from the voting configuration before + * returning. If the timeout expires before the appropriate condition + * is satisfied, the request fails and returns an error. */ timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { node_names?: never, node_ids?: never, master_timeout?: never, timeout?: never } @@ -10462,19 +15598,30 @@ export interface ClusterPostVotingConfigExclusionsRequest extends RequestBase { export type ClusterPostVotingConfigExclusionsResponse = boolean export interface ClusterPutComponentTemplateRequest extends RequestBase { -/** Name of the component template to create. Elasticsearch includes the following built-in component templates: `logs-mappings`; `logs-settings`; `metrics-mappings`; `metrics-settings`;`synthetics-mapping`; `synthetics-settings`. Elastic Agent uses these templates to configure backing indices for its data streams. If you use Elastic Agent and want to overwrite one of these templates, set the `version` for your replacement template higher than the current version. If you don’t use Elastic Agent and want to disable all built-in component and index templates, set `stack.templates.enabled` to `false` using the cluster update settings API. */ + /** Name of the component template to create. + * Elasticsearch includes the following built-in component templates: `logs-mappings`; `logs-settings`; `metrics-mappings`; `metrics-settings`;`synthetics-mapping`; `synthetics-settings`. + * Elastic Agent uses these templates to configure backing indices for its data streams. + * If you use Elastic Agent and want to overwrite one of these templates, set the `version` for your replacement template higher than the current version. + * If you don’t use Elastic Agent and want to disable all built-in component and index templates, set `stack.templates.enabled` to `false` using the cluster update settings API. */ name: Name /** If `true`, this request cannot replace or update existing component templates. */ create?: boolean - /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ + /** Period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration /** The template to be applied which includes mappings, settings, or aliases configuration. */ template: IndicesIndexState - /** Version number used to manage component templates externally. This number isn't automatically generated or incremented by Elasticsearch. To unset a version, replace the template without specifying a version. */ + /** Version number used to manage component templates externally. + * This number isn't automatically generated or incremented by Elasticsearch. + * To unset a version, replace the template without specifying a version. */ version?: VersionNumber - /** Optional user metadata about the component template. It may have any contents. This map is not automatically generated by Elasticsearch. This information is stored in the cluster state, so keeping it short is preferable. To unset `_meta`, replace the template without specifying this information. */ + /** Optional user metadata about the component template. + * It may have any contents. This map is not automatically generated by Elasticsearch. + * This information is stored in the cluster state, so keeping it short is preferable. + * To unset `_meta`, replace the template without specifying this information. */ _meta?: Metadata - /** Marks this index template as deprecated. When creating or updating a non-deprecated index template that uses deprecated components, Elasticsearch will emit a deprecation warning. */ + /** Marks this index template as deprecated. When creating or updating a non-deprecated index template + * that uses deprecated components, Elasticsearch will emit a deprecation warning. */ deprecated?: boolean /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { name?: never, create?: never, master_timeout?: never, template?: never, version?: never, _meta?: never, deprecated?: never } @@ -10485,7 +15632,7 @@ export interface ClusterPutComponentTemplateRequest extends RequestBase { export type ClusterPutComponentTemplateResponse = AcknowledgedResponseBase export interface ClusterPutSettingsRequest extends RequestBase { -/** Return settings in flat format (default: false) */ + /** Return settings in flat format (default: false) */ flat_settings?: boolean /** Explicit operation timeout for connection to master node */ master_timeout?: Duration @@ -10508,24 +15655,43 @@ export interface ClusterPutSettingsResponse { export type ClusterRemoteInfoClusterRemoteInfo = ClusterRemoteInfoClusterRemoteSniffInfo | ClusterRemoteInfoClusterRemoteProxyInfo export interface ClusterRemoteInfoClusterRemoteProxyInfo { + /** The connection mode for the remote cluster. */ mode: 'proxy' + /** If it is `true`, there is at least one open connection to the remote cluster. + * If it is `false`, it means that the cluster no longer has an open connection to the remote cluster. + * It does not necessarily mean that the remote cluster is down or unavailable, just that at some point a connection was lost. */ connected: boolean + /** The initial connect timeout for remote cluster connections. */ initial_connect_timeout: Duration + /** If `true`, cross-cluster search skips the remote cluster when its nodes are unavailable during the search and ignores errors returned by the remote cluster. */ skip_unavailable: boolean + /** The address for remote connections when proxy mode is configured. */ proxy_address: string server_name: string + /** The number of open socket connections to the remote cluster when proxy mode is configured. */ num_proxy_sockets_connected: integer + /** The maximum number of socket connections to the remote cluster when proxy mode is configured. */ max_proxy_socket_connections: integer + /** This field is present and has a value of `::es_redacted::` only when the remote cluster is configured with the API key based model. Otherwise, the field is not present. */ cluster_credentials?: string } export interface ClusterRemoteInfoClusterRemoteSniffInfo { + /** The connection mode for the remote cluster. */ mode: 'sniff' + /** If it is `true`, there is at least one open connection to the remote cluster. + * If it is `false`, it means that the cluster no longer has an open connection to the remote cluster. + * It does not necessarily mean that the remote cluster is down or unavailable, just that at some point a connection was lost. */ connected: boolean + /** The maximum number of connections maintained for the remote cluster when sniff mode is configured. */ max_connections_per_cluster: integer + /** The number of connected nodes in the remote cluster when sniff mode is configured. */ num_nodes_connected: long + /** The initial connect timeout for remote cluster connections. */ initial_connect_timeout: Duration + /** If `true`, cross-cluster search skips the remote cluster when its nodes are unavailable during the search and ignores errors returned by the remote cluster. */ skip_unavailable: boolean + /** The initial seed transport addresses of the remote cluster when sniff mode is configured. */ seeds: string[] } @@ -10539,10 +15705,15 @@ export interface ClusterRemoteInfoRequest extends RequestBase { export type ClusterRemoteInfoResponse = Record export interface ClusterRerouteCommand { + /** Cancel allocation of a shard (or recovery). Accepts index and shard for index name and shard number, and node for the node to cancel the shard allocation on. This can be used to force resynchronization of existing replicas from the primary shard by cancelling them and allowing them to be reinitialized through the standard recovery process. By default only replica shard allocations can be cancelled. If it is necessary to cancel the allocation of a primary shard then the allow_primary flag must also be included in the request. */ cancel?: ClusterRerouteCommandCancelAction + /** Move a started shard from one node to another node. Accepts index and shard for index name and shard number, from_node for the node to move the shard from, and to_node for the node to move the shard to. */ move?: ClusterRerouteCommandMoveAction + /** Allocate an unassigned replica shard to a node. Accepts index and shard for index name and shard number, and node to allocate the shard to. Takes allocation deciders into account. */ allocate_replica?: ClusterRerouteCommandAllocateReplicaAction + /** Allocate a primary shard to a node that holds a stale copy. Accepts the index and shard for index name and shard number, and node to allocate the shard to. Using this command may lead to data loss for the provided shard id. If a node which has the good copy of the data rejoins the cluster later on, that data will be deleted or overwritten with the data of the stale copy that was forcefully allocated with this command. To ensure that these implications are well-understood, this command requires the flag accept_data_loss to be explicitly set to true. */ allocate_stale_primary?: ClusterRerouteCommandAllocatePrimaryAction + /** Allocate an empty primary shard to a node. Accepts the index and shard for index name and shard number, and node to allocate the shard to. Using this command leads to a complete loss of all data that was indexed into this shard, if it was previously started. If a node which has a copy of the data rejoins the cluster later on, that data will be deleted. To ensure that these implications are well-understood, this command requires the flag accept_data_loss to be explicitly set to true. */ allocate_empty_primary?: ClusterRerouteCommandAllocatePrimaryAction } @@ -10550,6 +15721,7 @@ export interface ClusterRerouteCommandAllocatePrimaryAction { index: IndexName shard: integer node: string + /** If a node which has a copy of the data rejoins the cluster later on, that data will be deleted. To ensure that these implications are well-understood, this command requires the flag accept_data_loss to be explicitly set to true */ accept_data_loss: boolean } @@ -10569,12 +15741,15 @@ export interface ClusterRerouteCommandCancelAction { export interface ClusterRerouteCommandMoveAction { index: IndexName shard: integer + /** The node to move the shard from */ from_node: string + /** The node to move the shard to */ to_node: string } export interface ClusterRerouteRequest extends RequestBase { -/** If true, then the request simulates the operation. It will calculate the result of applying the commands to the current cluster state and return the resulting cluster state after the commands (and rebalancing) have been applied; it will not actually perform the requested changes. */ + /** If true, then the request simulates the operation. + * It will calculate the result of applying the commands to the current cluster state and return the resulting cluster state after the commands (and rebalancing) have been applied; it will not actually perform the requested changes. */ dry_run?: boolean /** If true, then the response contains an explanation of why the commands can or cannot run. */ explain?: boolean @@ -10618,11 +15793,14 @@ export interface ClusterRerouteRerouteParameters { export interface ClusterRerouteResponse { acknowledged: boolean explanations?: ClusterRerouteRerouteExplanation[] + /** There aren't any guarantees on the output/structure of the raw cluster state. + * Here you will find the internal representation of the cluster, which can + * differ from the external representation. */ state?: any } export interface ClusterStateRequest extends RequestBase { -/** Limit the information returned to the specified metrics */ + /** Limit the information returned to the specified metrics */ metric?: Metrics /** A comma-separated list of index names; use `_all` or empty string to perform the operation on all indices */ index?: Indices @@ -10651,46 +15829,77 @@ export interface ClusterStateRequest extends RequestBase { export type ClusterStateResponse = any export interface ClusterStatsCharFilterTypes { + /** Contains statistics about analyzer types used in selected nodes. */ analyzer_types: ClusterStatsFieldTypes[] + /** Contains statistics about built-in analyzers used in selected nodes. */ built_in_analyzers: ClusterStatsFieldTypes[] + /** Contains statistics about built-in character filters used in selected nodes. */ built_in_char_filters: ClusterStatsFieldTypes[] + /** Contains statistics about built-in token filters used in selected nodes. */ built_in_filters: ClusterStatsFieldTypes[] + /** Contains statistics about built-in tokenizers used in selected nodes. */ built_in_tokenizers: ClusterStatsFieldTypes[] + /** Contains statistics about character filter types used in selected nodes. */ char_filter_types: ClusterStatsFieldTypes[] + /** Contains statistics about token filter types used in selected nodes. */ filter_types: ClusterStatsFieldTypes[] + /** Contains statistics about tokenizer types used in selected nodes. */ tokenizer_types: ClusterStatsFieldTypes[] } export interface ClusterStatsClusterFileSystem { + /** Total number of bytes available to JVM in file stores across all selected nodes. + * Depending on operating system or process-level restrictions, this number may be less than `nodes.fs.free_in_byes`. + * This is the actual amount of free disk space the selected Elasticsearch nodes can use. */ available_in_bytes: long + /** Total number of unallocated bytes in file stores across all selected nodes. */ free_in_bytes: long + /** Total size, in bytes, of all file stores across all selected nodes. */ total_in_bytes: long } export interface ClusterStatsClusterIndices { + /** Contains statistics about analyzers and analyzer components used in selected nodes. */ analysis: ClusterStatsCharFilterTypes + /** Contains statistics about memory used for completion in selected nodes. */ completion: CompletionStats + /** Total number of indices with shards assigned to selected nodes. */ count: long + /** Contains counts for documents in selected nodes. */ docs: DocStats + /** Contains statistics about the field data cache of selected nodes. */ fielddata: FielddataStats + /** Contains statistics about the query cache of selected nodes. */ query_cache: QueryCacheStats + /** Contains statistics about segments in selected nodes. */ segments: SegmentsStats + /** Contains statistics about indices with shards assigned to selected nodes. */ shards: ClusterStatsClusterIndicesShards + /** Contains statistics about the size of shards assigned to selected nodes. */ store: StoreStats + /** Contains statistics about field mappings in selected nodes. */ mappings: ClusterStatsFieldTypesMappings + /** Contains statistics about analyzers and analyzer components used in selected nodes. */ versions?: ClusterStatsIndicesVersions[] } export interface ClusterStatsClusterIndicesShards { + /** Contains statistics about shards assigned to selected nodes. */ index?: ClusterStatsClusterIndicesShardsIndex + /** Number of primary shards assigned to selected nodes. */ primaries?: double + /** Ratio of replica shards to primary shards across all selected nodes. */ replication?: double + /** Total number of shards assigned to selected nodes. */ total?: double } export interface ClusterStatsClusterIndicesShardsIndex { + /** Contains statistics about the number of primary shards assigned to selected nodes. */ primaries: ClusterStatsClusterShardMetrics + /** Contains statistics about the number of replication shards assigned to selected nodes. */ replication: ClusterStatsClusterShardMetrics + /** Contains statistics about the number of shards assigned to selected nodes. */ shards: ClusterStatsClusterShardMetrics } @@ -10700,29 +15909,45 @@ export interface ClusterStatsClusterIngest { } export interface ClusterStatsClusterJvm { + /** Uptime duration, in milliseconds, since JVM last started. */ max_uptime_in_millis: DurationValue + /** Contains statistics about memory used by selected nodes. */ mem: ClusterStatsClusterJvmMemory + /** Number of active threads in use by JVM across all selected nodes. */ threads: long + /** Contains statistics about the JVM versions used by selected nodes. */ versions: ClusterStatsClusterJvmVersion[] } export interface ClusterStatsClusterJvmMemory { + /** Maximum amount of memory, in bytes, available for use by the heap across all selected nodes. */ heap_max_in_bytes: long + /** Memory, in bytes, currently in use by the heap across all selected nodes. */ heap_used_in_bytes: long } export interface ClusterStatsClusterJvmVersion { + /** Always `true`. All distributions come with a bundled Java Development Kit (JDK). */ bundled_jdk: boolean + /** Total number of selected nodes using JVM. */ count: integer + /** If `true`, a bundled JDK is in use by JVM. */ using_bundled_jdk: boolean + /** Version of JVM used by one or more selected nodes. */ version: VersionString + /** Name of the JVM. */ vm_name: string + /** Vendor of the JVM. */ vm_vendor: string + /** Full version number of JVM. + * The full version number includes a plus sign (+) followed by the build number. */ vm_version: VersionString } export interface ClusterStatsClusterNetworkTypes { + /** Contains statistics about the HTTP network types used by selected nodes. */ http_types: Record + /** Contains statistics about the transport network types used by selected nodes. */ transport_types: Record } @@ -10744,56 +15969,91 @@ export interface ClusterStatsClusterNodeCount { } export interface ClusterStatsClusterNodes { + /** Contains counts for nodes selected by the request’s node filters. */ count: ClusterStatsClusterNodeCount + /** Contains statistics about the discovery types used by selected nodes. */ discovery_types: Record + /** Contains statistics about file stores by selected nodes. */ fs: ClusterStatsClusterFileSystem indexing_pressure: ClusterStatsIndexingPressure ingest: ClusterStatsClusterIngest + /** Contains statistics about the Java Virtual Machines (JVMs) used by selected nodes. */ jvm: ClusterStatsClusterJvm + /** Contains statistics about the transport and HTTP networks used by selected nodes. */ network_types: ClusterStatsClusterNetworkTypes + /** Contains statistics about the operating systems used by selected nodes. */ os: ClusterStatsClusterOperatingSystem + /** Contains statistics about Elasticsearch distributions installed on selected nodes. */ packaging_types: ClusterStatsNodePackagingType[] + /** Contains statistics about installed plugins and modules by selected nodes. + * If no plugins or modules are installed, this array is empty. */ plugins: PluginStats[] + /** Contains statistics about processes used by selected nodes. */ process: ClusterStatsClusterProcess + /** Array of Elasticsearch versions used on selected nodes. */ versions: VersionString[] } export interface ClusterStatsClusterOperatingSystem { + /** Number of processors used to calculate thread pool size across all selected nodes. + * This number can be set with the processors setting of a node and defaults to the number of processors reported by the operating system. + * In both cases, this number will never be larger than 32. */ allocated_processors: integer + /** Contains statistics about processor architectures (for example, x86_64 or aarch64) used by selected nodes. */ architectures?: ClusterStatsClusterOperatingSystemArchitecture[] + /** Number of processors available to JVM across all selected nodes. */ available_processors: integer + /** Contains statistics about memory used by selected nodes. */ mem: ClusterStatsOperatingSystemMemoryInfo + /** Contains statistics about operating systems used by selected nodes. */ names: ClusterStatsClusterOperatingSystemName[] + /** Contains statistics about operating systems used by selected nodes. */ pretty_names: ClusterStatsClusterOperatingSystemPrettyName[] } export interface ClusterStatsClusterOperatingSystemArchitecture { + /** Name of an architecture used by one or more selected nodes. */ arch: string + /** Number of selected nodes using the architecture. */ count: integer } export interface ClusterStatsClusterOperatingSystemName { + /** Number of selected nodes using the operating system. */ count: integer + /** Name of an operating system used by one or more selected nodes. */ name: Name } export interface ClusterStatsClusterOperatingSystemPrettyName { + /** Number of selected nodes using the operating system. */ count: integer + /** Human-readable name of an operating system used by one or more selected nodes. */ pretty_name: Name } export interface ClusterStatsClusterProcess { + /** Contains statistics about CPU used by selected nodes. */ cpu: ClusterStatsClusterProcessCpu + /** Contains statistics about open file descriptors in selected nodes. */ open_file_descriptors: ClusterStatsClusterProcessOpenFileDescriptors } export interface ClusterStatsClusterProcessCpu { + /** Percentage of CPU used across all selected nodes. + * Returns `-1` if not supported. */ percent: integer } export interface ClusterStatsClusterProcessOpenFileDescriptors { + /** Average number of concurrently open file descriptors. + * Returns `-1` if not supported. */ avg: long + /** Maximum number of concurrently open file descriptors allowed across all selected nodes. + * Returns `-1` if not supported. */ max: long + /** Minimum number of concurrently open file descriptors across all selected nodes. + * Returns -1 if not supported. */ min: long } @@ -10806,27 +16066,43 @@ export interface ClusterStatsClusterProcessor { } export interface ClusterStatsClusterShardMetrics { + /** Mean number of shards in an index, counting only shards assigned to selected nodes. */ avg: double + /** Maximum number of shards in an index, counting only shards assigned to selected nodes. */ max: double + /** Minimum number of shards in an index, counting only shards assigned to selected nodes. */ min: double } export interface ClusterStatsFieldTypes { + /** The name for the field type in selected nodes. */ name: Name + /** The number of occurrences of the field type in selected nodes. */ count: integer + /** The number of indices containing the field type in selected nodes. */ index_count: integer + /** For dense_vector field types, number of indexed vector types in selected nodes. */ indexed_vector_count?: long + /** For dense_vector field types, the maximum dimension of all indexed vector types in selected nodes. */ indexed_vector_dim_max?: long + /** For dense_vector field types, the minimum dimension of all indexed vector types in selected nodes. */ indexed_vector_dim_min?: long + /** The number of fields that declare a script. */ script_count?: integer } export interface ClusterStatsFieldTypesMappings { + /** Contains statistics about field data types used in selected nodes. */ field_types: ClusterStatsFieldTypes[] + /** Contains statistics about runtime field data types used in selected nodes. */ runtime_field_types?: ClusterStatsRuntimeFieldTypes[] + /** Total number of fields in all non-system indices. */ total_field_count?: integer + /** Total number of fields in all non-system indices, accounting for mapping deduplication. */ total_deduplicated_field_count?: integer + /** Total size of all mappings after deduplication and compression. */ total_deduplicated_mapping_size?: ByteSize + /** Total size of all mappings, in bytes, after deduplication and compression. */ total_deduplicated_mapping_size_in_bytes?: long } @@ -10859,26 +16135,37 @@ export interface ClusterStatsIndicesVersions { } export interface ClusterStatsNodePackagingType { + /** Number of selected nodes using the distribution flavor and file type. */ count: integer + /** Type of Elasticsearch distribution. This is always `default`. */ flavor: string + /** File type (such as `tar` or `zip`) used for the distribution package. */ type: string } export interface ClusterStatsOperatingSystemMemoryInfo { + /** Total amount, in bytes, of memory across all selected nodes, but using the value specified using the `es.total_memory_bytes` system property instead of measured total memory for those nodes where that system property was set. */ adjusted_total_in_bytes?: long + /** Amount, in bytes, of free physical memory across all selected nodes. */ free_in_bytes: long + /** Percentage of free physical memory across all selected nodes. */ free_percent: integer + /** Total amount, in bytes, of physical memory across all selected nodes. */ total_in_bytes: long + /** Amount, in bytes, of physical memory in use across all selected nodes. */ used_in_bytes: long + /** Percentage of physical memory in use across all selected nodes. */ used_percent: integer } export interface ClusterStatsRequest extends RequestBase { -/** Comma-separated list of node filters used to limit returned information. Defaults to all nodes in the cluster. */ + /** Comma-separated list of node filters used to limit returned information. Defaults to all nodes in the cluster. */ node_id?: NodeIds /** Include remote cluster data into the response */ include_remotes?: boolean - /** Period to wait for each node to respond. If a node does not respond before its timeout expires, the response does not include its stats. However, timed out nodes are included in the response’s `_nodes.failed` property. Defaults to no timeout. */ + /** Period to wait for each node to respond. + * If a node does not respond before its timeout expires, the response does not include its stats. + * However, timed out nodes are included in the response’s `_nodes.failed` property. Defaults to no timeout. */ timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { node_id?: never, include_remotes?: never, timeout?: never } @@ -10889,28 +16176,48 @@ export interface ClusterStatsRequest extends RequestBase { export type ClusterStatsResponse = ClusterStatsStatsResponseBase export interface ClusterStatsRuntimeFieldTypes { + /** Maximum number of characters for a single runtime field script. */ chars_max: integer + /** Total number of characters for the scripts that define the current runtime field data type. */ chars_total: integer + /** Number of runtime fields mapped to the field data type in selected nodes. */ count: integer + /** Maximum number of accesses to doc_values for a single runtime field script */ doc_max: integer + /** Total number of accesses to doc_values for the scripts that define the current runtime field data type. */ doc_total: integer + /** Number of indices containing a mapping of the runtime field data type in selected nodes. */ index_count: integer + /** Script languages used for the runtime fields scripts. */ lang: string[] + /** Maximum number of lines for a single runtime field script. */ lines_max: integer + /** Total number of lines for the scripts that define the current runtime field data type. */ lines_total: integer + /** Field data type used in selected nodes. */ name: Name + /** Number of runtime fields that don’t declare a script. */ scriptless_count: integer + /** Number of runtime fields that shadow an indexed field. */ shadowed_count: integer + /** Maximum number of accesses to _source for a single runtime field script. */ source_max: integer + /** Total number of accesses to _source for the scripts that define the current runtime field data type. */ source_total: integer } export interface ClusterStatsStatsResponseBase extends NodesNodesResponseBase { + /** Name of the cluster, based on the cluster name setting. */ cluster_name: Name + /** Unique identifier for the cluster. */ cluster_uuid: Uuid + /** Contains statistics about indices with shards assigned to selected nodes. */ indices: ClusterStatsClusterIndices + /** Contains statistics about nodes selected by the request’s node filters. */ nodes: ClusterStatsClusterNodes + /** Health status of the cluster, based on the state of its primary and replica shards. */ status: HealthStatus + /** Unix timestamp, in milliseconds, for the last time the cluster statistics were refreshed. */ timestamp: long } @@ -10971,8 +16278,11 @@ export type ConnectorConnectorConfiguration = Record export interface ConnectorConnectorFeatures { + /** Indicates whether document-level security is enabled. */ document_level_security?: ConnectorFeatureEnabled + /** Indicates whether incremental syncs are enabled. */ incremental_sync?: ConnectorFeatureEnabled + /** Indicates whether managed connector API keys are enabled. */ native_connector_api_keys?: ConnectorFeatureEnabled sync_rules?: ConnectorSyncRulesFeature } @@ -10981,6 +16291,7 @@ export type ConnectorConnectorFieldType = 'str' | 'int' | 'list' | 'bool' export interface ConnectorConnectorScheduling { enabled: boolean + /** The interval is expressed using the crontab syntax */ interval: string } @@ -11138,7 +16449,9 @@ export type ConnectorSyncJobTriggerMethod = 'on_demand' | 'scheduled' export type ConnectorSyncJobType = 'full' | 'incremental' | 'access_control' export interface ConnectorSyncRulesFeature { + /** Indicates whether advanced sync rules are enabled. */ advanced?: ConnectorFeatureEnabled + /** Indicates whether basic sync rules are enabled. */ basic?: ConnectorFeatureEnabled } @@ -11147,7 +16460,7 @@ export type ConnectorSyncStatus = 'canceling' | 'canceled' | 'completed' | 'erro export type ConnectorValidation = ConnectorLessThanValidation | ConnectorGreaterThanValidation | ConnectorListTypeValidation | ConnectorIncludedInValidation | ConnectorRegexValidation export interface ConnectorCheckInRequest extends RequestBase { -/** The unique identifier of the connector to be checked in */ + /** The unique identifier of the connector to be checked in */ connector_id: Id /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { connector_id?: never } @@ -11160,7 +16473,7 @@ export interface ConnectorCheckInResponse { } export interface ConnectorDeleteRequest extends RequestBase { -/** The unique identifier of the connector to be deleted */ + /** The unique identifier of the connector to be deleted */ connector_id: Id /** A flag indicating if associated sync jobs should be also removed. Defaults to false. */ delete_sync_jobs?: boolean @@ -11175,7 +16488,7 @@ export interface ConnectorDeleteRequest extends RequestBase { export type ConnectorDeleteResponse = AcknowledgedResponseBase export interface ConnectorGetRequest extends RequestBase { -/** The unique identifier of the connector */ + /** The unique identifier of the connector */ connector_id: Id /** A flag to indicate if the desired connector should be fetched, even if it was soft-deleted. */ include_deleted?: boolean @@ -11188,7 +16501,7 @@ export interface ConnectorGetRequest extends RequestBase { export type ConnectorGetResponse = ConnectorConnector export interface ConnectorLastSyncRequest extends RequestBase { -/** The unique identifier of the connector to be updated */ + /** The unique identifier of the connector to be updated */ connector_id: Id last_access_control_sync_error?: string last_access_control_sync_scheduled_at?: DateTime @@ -11213,7 +16526,7 @@ export interface ConnectorLastSyncResponse { } export interface ConnectorListRequest extends RequestBase { -/** Starting offset (default: 0) */ + /** Starting offset (default: 0) */ from?: integer /** Specifies a max number of results to get */ size?: integer @@ -11257,7 +16570,7 @@ export interface ConnectorPostResponse { } export interface ConnectorPutRequest extends RequestBase { -/** The unique identifier of the connector to be created or updated. ID is auto-generated if not provided. */ + /** The unique identifier of the connector to be created or updated. ID is auto-generated if not provided. */ connector_id?: Id description?: string index_name?: IndexName @@ -11277,7 +16590,7 @@ export interface ConnectorPutResponse { } export interface ConnectorSyncJobCancelRequest extends RequestBase { -/** The unique identifier of the connector sync job */ + /** The unique identifier of the connector sync job */ connector_sync_job_id: Id /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { connector_sync_job_id?: never } @@ -11290,7 +16603,7 @@ export interface ConnectorSyncJobCancelResponse { } export interface ConnectorSyncJobCheckInRequest extends RequestBase { -/** The unique identifier of the connector sync job to be checked in. */ + /** The unique identifier of the connector sync job to be checked in. */ connector_sync_job_id: Id /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { connector_sync_job_id?: never } @@ -11302,9 +16615,10 @@ export interface ConnectorSyncJobCheckInResponse { } export interface ConnectorSyncJobClaimRequest extends RequestBase { -/** The unique identifier of the connector sync job. */ + /** The unique identifier of the connector sync job. */ connector_sync_job_id: Id - /** The cursor object from the last incremental sync job. This should reference the `sync_cursor` field in the connector state for which the job runs. */ + /** The cursor object from the last incremental sync job. + * This should reference the `sync_cursor` field in the connector state for which the job runs. */ sync_cursor?: any /** The host name of the current system that will run the job. */ worker_hostname: string @@ -11318,7 +16632,7 @@ export interface ConnectorSyncJobClaimResponse { } export interface ConnectorSyncJobDeleteRequest extends RequestBase { -/** The unique identifier of the connector sync job to be deleted */ + /** The unique identifier of the connector sync job to be deleted */ connector_sync_job_id: Id /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { connector_sync_job_id?: never } @@ -11329,7 +16643,7 @@ export interface ConnectorSyncJobDeleteRequest extends RequestBase { export type ConnectorSyncJobDeleteResponse = AcknowledgedResponseBase export interface ConnectorSyncJobErrorRequest extends RequestBase { -/** The unique identifier for the connector sync job. */ + /** The unique identifier for the connector sync job. */ connector_sync_job_id: Id /** The error for the connector sync job error field. */ error: string @@ -11343,7 +16657,7 @@ export interface ConnectorSyncJobErrorResponse { } export interface ConnectorSyncJobGetRequest extends RequestBase { -/** The unique identifier of the connector sync job */ + /** The unique identifier of the connector sync job */ connector_sync_job_id: Id /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { connector_sync_job_id?: never } @@ -11354,7 +16668,7 @@ export interface ConnectorSyncJobGetRequest extends RequestBase { export type ConnectorSyncJobGetResponse = ConnectorConnectorSyncJob export interface ConnectorSyncJobListRequest extends RequestBase { -/** Starting offset (default: 0) */ + /** Starting offset (default: 0) */ from?: integer /** Specifies a max number of results to get */ size?: integer @@ -11376,7 +16690,7 @@ export interface ConnectorSyncJobListResponse { } export interface ConnectorSyncJobPostRequest extends RequestBase { -/** The id of the associated connector */ + /** The id of the associated connector */ id: Id job_type?: ConnectorSyncJobType trigger_method?: ConnectorSyncJobTriggerMethod @@ -11391,7 +16705,7 @@ export interface ConnectorSyncJobPostResponse { } export interface ConnectorSyncJobUpdateStatsRequest extends RequestBase { -/** The unique identifier of the connector sync job. */ + /** The unique identifier of the connector sync job. */ connector_sync_job_id: Id /** The number of documents the sync job deleted. */ deleted_document_count: long @@ -11415,7 +16729,7 @@ export interface ConnectorSyncJobUpdateStatsResponse { } export interface ConnectorUpdateActiveFilteringRequest extends RequestBase { -/** The unique identifier of the connector to be updated */ + /** The unique identifier of the connector to be updated */ connector_id: Id /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { connector_id?: never } @@ -11428,7 +16742,7 @@ export interface ConnectorUpdateActiveFilteringResponse { } export interface ConnectorUpdateApiKeyIdRequest extends RequestBase { -/** The unique identifier of the connector to be updated */ + /** The unique identifier of the connector to be updated */ connector_id: Id api_key_id?: string api_key_secret_id?: string @@ -11443,7 +16757,7 @@ export interface ConnectorUpdateApiKeyIdResponse { } export interface ConnectorUpdateConfigurationRequest extends RequestBase { -/** The unique identifier of the connector to be updated */ + /** The unique identifier of the connector to be updated */ connector_id: Id configuration?: ConnectorConnectorConfiguration values?: Record @@ -11458,7 +16772,7 @@ export interface ConnectorUpdateConfigurationResponse { } export interface ConnectorUpdateErrorRequest extends RequestBase { -/** The unique identifier of the connector to be updated */ + /** The unique identifier of the connector to be updated */ connector_id: Id error: SpecUtilsWithNullValue /** All values in `body` will be added to the request body. */ @@ -11472,7 +16786,7 @@ export interface ConnectorUpdateErrorResponse { } export interface ConnectorUpdateFeaturesRequest extends RequestBase { -/** The unique identifier of the connector to be updated. */ + /** The unique identifier of the connector to be updated. */ connector_id: Id features: ConnectorConnectorFeatures /** All values in `body` will be added to the request body. */ @@ -11486,7 +16800,7 @@ export interface ConnectorUpdateFeaturesResponse { } export interface ConnectorUpdateFilteringRequest extends RequestBase { -/** The unique identifier of the connector to be updated */ + /** The unique identifier of the connector to be updated */ connector_id: Id filtering?: ConnectorFilteringConfig[] rules?: ConnectorFilteringRule[] @@ -11502,7 +16816,7 @@ export interface ConnectorUpdateFilteringResponse { } export interface ConnectorUpdateFilteringValidationRequest extends RequestBase { -/** The unique identifier of the connector to be updated */ + /** The unique identifier of the connector to be updated */ connector_id: Id validation: ConnectorFilteringRulesValidation /** All values in `body` will be added to the request body. */ @@ -11516,7 +16830,7 @@ export interface ConnectorUpdateFilteringValidationResponse { } export interface ConnectorUpdateIndexNameRequest extends RequestBase { -/** The unique identifier of the connector to be updated */ + /** The unique identifier of the connector to be updated */ connector_id: Id index_name: SpecUtilsWithNullValue /** All values in `body` will be added to the request body. */ @@ -11530,7 +16844,7 @@ export interface ConnectorUpdateIndexNameResponse { } export interface ConnectorUpdateNameRequest extends RequestBase { -/** The unique identifier of the connector to be updated */ + /** The unique identifier of the connector to be updated */ connector_id: Id name?: string description?: string @@ -11545,7 +16859,7 @@ export interface ConnectorUpdateNameResponse { } export interface ConnectorUpdateNativeRequest extends RequestBase { -/** The unique identifier of the connector to be updated */ + /** The unique identifier of the connector to be updated */ connector_id: Id is_native: boolean /** All values in `body` will be added to the request body. */ @@ -11559,7 +16873,7 @@ export interface ConnectorUpdateNativeResponse { } export interface ConnectorUpdatePipelineRequest extends RequestBase { -/** The unique identifier of the connector to be updated */ + /** The unique identifier of the connector to be updated */ connector_id: Id pipeline: ConnectorIngestPipelineParams /** All values in `body` will be added to the request body. */ @@ -11573,7 +16887,7 @@ export interface ConnectorUpdatePipelineResponse { } export interface ConnectorUpdateSchedulingRequest extends RequestBase { -/** The unique identifier of the connector to be updated */ + /** The unique identifier of the connector to be updated */ connector_id: Id scheduling: ConnectorSchedulingConfiguration /** All values in `body` will be added to the request body. */ @@ -11587,7 +16901,7 @@ export interface ConnectorUpdateSchedulingResponse { } export interface ConnectorUpdateServiceTypeRequest extends RequestBase { -/** The unique identifier of the connector to be updated */ + /** The unique identifier of the connector to be updated */ connector_id: Id service_type: string /** All values in `body` will be added to the request body. */ @@ -11601,7 +16915,7 @@ export interface ConnectorUpdateServiceTypeResponse { } export interface ConnectorUpdateStatusRequest extends RequestBase { -/** The unique identifier of the connector to be updated */ + /** The unique identifier of the connector to be updated */ connector_id: Id status: ConnectorConnectorStatus /** All values in `body` will be added to the request body. */ @@ -11615,7 +16929,7 @@ export interface ConnectorUpdateStatusResponse { } export interface DanglingIndicesDeleteDanglingIndexRequest extends RequestBase { -/** The UUID of the index to delete. Use the get dangling indices API to find the UUID. */ + /** The UUID of the index to delete. Use the get dangling indices API to find the UUID. */ index_uuid: Uuid /** This parameter must be set to true to acknowledge that it will no longer be possible to recove data from the dangling index. */ accept_data_loss: boolean @@ -11632,9 +16946,10 @@ export interface DanglingIndicesDeleteDanglingIndexRequest extends RequestBase { export type DanglingIndicesDeleteDanglingIndexResponse = AcknowledgedResponseBase export interface DanglingIndicesImportDanglingIndexRequest extends RequestBase { -/** The UUID of the index to import. Use the get dangling indices API to locate the UUID. */ + /** The UUID of the index to import. Use the get dangling indices API to locate the UUID. */ index_uuid: Uuid - /** This parameter must be set to true to import a dangling index. Because Elasticsearch cannot know where the dangling index data came from or determine which shard copies are fresh and which are stale, it cannot guarantee that the imported data represents the latest state of the index when it was last in the cluster. */ + /** This parameter must be set to true to import a dangling index. + * Because Elasticsearch cannot know where the dangling index data came from or determine which shard copies are fresh and which are stale, it cannot guarantee that the imported data represents the latest state of the index when it was last in the cluster. */ accept_data_loss: boolean /** Specify timeout for connection to master */ master_timeout?: Duration @@ -11682,7 +16997,7 @@ export interface EnrichSummary { } export interface EnrichDeletePolicyRequest extends RequestBase { -/** Enrich policy to delete. */ + /** Enrich policy to delete. */ name: Name /** Period to wait for a connection to the master node. */ master_timeout?: Duration @@ -11694,14 +17009,15 @@ export interface EnrichDeletePolicyRequest extends RequestBase { export type EnrichDeletePolicyResponse = AcknowledgedResponseBase -export type EnrichExecutePolicyEnrichPolicyPhase = 'SCHEDULED' | 'RUNNING' | 'COMPLETE' | 'FAILED' +export type EnrichExecutePolicyEnrichPolicyPhase = 'SCHEDULED' | 'RUNNING' | 'COMPLETE' | 'FAILED' | 'CANCELLED' export interface EnrichExecutePolicyExecuteEnrichPolicyStatus { phase: EnrichExecutePolicyEnrichPolicyPhase + step?: string } export interface EnrichExecutePolicyRequest extends RequestBase { -/** Enrich policy to execute. */ + /** Enrich policy to execute. */ name: Name /** Period to wait for a connection to the master node. */ master_timeout?: Duration @@ -11715,11 +17031,12 @@ export interface EnrichExecutePolicyRequest extends RequestBase { export interface EnrichExecutePolicyResponse { status?: EnrichExecutePolicyExecuteEnrichPolicyStatus - task_id?: TaskId + task?: TaskId } export interface EnrichGetPolicyRequest extends RequestBase { -/** Comma-separated list of enrich policy names used to limit the request. To return information for all enrich policies, omit this parameter. */ + /** Comma-separated list of enrich policy names used to limit the request. + * To return information for all enrich policies, omit this parameter. */ name?: Names /** Period to wait for a connection to the master node. */ master_timeout?: Duration @@ -11734,7 +17051,7 @@ export interface EnrichGetPolicyResponse { } export interface EnrichPutPolicyRequest extends RequestBase { -/** Name of the enrich policy to create or update. */ + /** Name of the enrich policy to create or update. */ name: Name /** Period to wait for a connection to the master node. */ master_timeout?: Duration @@ -11777,7 +17094,7 @@ export interface EnrichStatsExecutingPolicy { } export interface EnrichStatsRequest extends RequestBase { -/** Period to wait for a connection to the master node. */ + /** Period to wait for a connection to the master node. */ master_timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { master_timeout?: never } @@ -11786,42 +17103,63 @@ export interface EnrichStatsRequest extends RequestBase { } export interface EnrichStatsResponse { + /** Objects containing information about each coordinating ingest node for configured enrich processors. */ coordinator_stats: EnrichStatsCoordinatorStats[] + /** Objects containing information about each enrich policy that is currently executing. */ executing_policies: EnrichStatsExecutingPolicy[] + /** Objects containing information about the enrich cache stats on each ingest node. */ cache_stats?: EnrichStatsCacheStats[] } export interface EqlEqlHits { + /** Metadata about the number of matching events or sequences. */ total?: SearchTotalHits + /** Contains events matching the query. Each object represents a matching event. */ events?: EqlHitsEvent[] + /** Contains event sequences matching the query. Each object represents a matching sequence. This parameter is only returned for EQL queries containing a sequence. */ sequences?: EqlHitsSequence[] } export interface EqlEqlSearchResponseBase { + /** Identifier for the search. */ id?: Id + /** If true, the response does not contain complete search results. */ is_partial?: boolean + /** If true, the search request is still executing. */ is_running?: boolean + /** Milliseconds it took Elasticsearch to execute the request. */ took?: DurationValue + /** If true, the request timed out before completion. */ timed_out?: boolean + /** Contains matching events and sequences. Also contains related metadata. */ hits: EqlEqlHits + /** Contains information about shard failures (if any), in case allow_partial_search_results=true */ shard_failures?: ShardFailure[] } export interface EqlHitsEvent { + /** Name of the index containing the event. */ _index: IndexName + /** Unique identifier for the event. This ID is only unique within the index. */ _id: Id + /** Original JSON body passed for the event at index time. */ _source: TEvent + /** Set to `true` for events in a timespan-constrained sequence that do not meet a given condition. */ missing?: boolean fields?: Record } export interface EqlHitsSequence { + /** Contains events matching the query. Each object represents a matching event. */ events: EqlHitsEvent[] + /** Shared field values used to constrain matches in the sequence. These are defined using the by keyword in the EQL query syntax. */ join_keys?: any[] } export interface EqlDeleteRequest extends RequestBase { -/** Identifier for the search to delete. A search ID is provided in the EQL search API's response for an async search. A search ID is also provided if the request’s `keep_on_completion` parameter is `true`. */ + /** Identifier for the search to delete. + * A search ID is provided in the EQL search API's response for an async search. + * A search ID is also provided if the request’s `keep_on_completion` parameter is `true`. */ id: Id /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { id?: never } @@ -11832,11 +17170,13 @@ export interface EqlDeleteRequest extends RequestBase { export type EqlDeleteResponse = AcknowledgedResponseBase export interface EqlGetRequest extends RequestBase { -/** Identifier for the search. */ + /** Identifier for the search. */ id: Id - /** Period for which the search and its results are stored on the cluster. Defaults to the keep_alive value set by the search’s EQL search API request. */ + /** Period for which the search and its results are stored on the cluster. + * Defaults to the keep_alive value set by the search’s EQL search API request. */ keep_alive?: Duration - /** Timeout duration to wait for the request to finish. Defaults to no timeout, meaning the request waits for complete search results. */ + /** Timeout duration to wait for the request to finish. + * Defaults to no timeout, meaning the request waits for complete search results. */ wait_for_completion_timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { id?: never, keep_alive?: never, wait_for_completion_timeout?: never } @@ -11847,7 +17187,7 @@ export interface EqlGetRequest extends RequestBase { export type EqlGetResponse = EqlEqlSearchResponseBase export interface EqlGetStatusRequest extends RequestBase { -/** Identifier for the search. */ + /** Identifier for the search. */ id: Id /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { id?: never } @@ -11856,16 +17196,22 @@ export interface EqlGetStatusRequest extends RequestBase { } export interface EqlGetStatusResponse { + /** Identifier for the search. */ id: Id + /** If true, the search request is still executing. If false, the search is completed. */ is_partial: boolean + /** If true, the response does not contain complete search results. This could be because either the search is still running (is_running status is false), or because it is already completed (is_running status is true) and results are partial due to failures or timeouts. */ is_running: boolean + /** For a running search shows a timestamp when the eql search started, in milliseconds since the Unix epoch. */ start_time_in_millis?: EpochTime + /** Shows a timestamp when the eql search will be expired, in milliseconds since the Unix epoch. When this time is reached, the search and its results are deleted, even if the search is still ongoing. */ expiration_time_in_millis?: EpochTime + /** For a completed search shows the http status code of the completed search. */ completion_status?: integer } export interface EqlSearchRequest extends RequestBase { -/** The name of the index to scope the operation */ + /** The name of the index to scope the operation */ index: Indices allow_no_indices?: boolean expand_wildcards?: ExpandWildcards @@ -11887,9 +17233,13 @@ export interface EqlSearchRequest extends RequestBase { keep_alive?: Duration keep_on_completion?: boolean wait_for_completion_timeout?: Duration - /** Allow query execution also in case of shard failures. If true, the query will keep running and will return results based on the available shards. For sequences, the behavior can be further refined using allow_partial_sequence_results */ + /** Allow query execution also in case of shard failures. + * If true, the query will keep running and will return results based on the available shards. + * For sequences, the behavior can be further refined using allow_partial_sequence_results */ allow_partial_search_results?: boolean - /** This flag applies only to sequences and has effect only if allow_partial_search_results=true. If true, the sequence query will return results based on the available shards, ignoring the others. If false, the sequence query will return successfully, but will always have empty results. */ + /** This flag applies only to sequences and has effect only if allow_partial_search_results=true. + * If true, the sequence query will return results based on the available shards, ignoring the others. + * If false, the sequence query will return successfully, but will always have empty results. */ allow_partial_sequence_results?: boolean /** For basic queries, the maximum number of matching events to return. Defaults to 10 */ size?: uint @@ -11897,7 +17247,9 @@ export interface EqlSearchRequest extends RequestBase { fields?: QueryDslFieldAndFormat | Field | (QueryDslFieldAndFormat | Field)[] result_position?: EqlSearchResultPosition runtime_mappings?: MappingRuntimeFields - /** By default, the response of a sample query contains up to `10` samples, with one sample per unique set of join keys. Use the `size` parameter to get a smaller or larger set of samples. To retrieve more than one sample per set of join keys, use the `max_samples_per_key` parameter. Pipes are not supported for sample queries. */ + /** By default, the response of a sample query contains up to `10` samples, with one sample per unique set of join keys. Use the `size` + * parameter to get a smaller or larger set of samples. To retrieve more than one sample per set of join keys, use the + * `max_samples_per_key` parameter. Pipes are not supported for sample queries. */ max_samples_per_key?: integer /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, query?: never, case_sensitive?: never, event_category_field?: never, tiebreaker_field?: never, timestamp_field?: never, fetch_size?: never, filter?: never, keep_alive?: never, keep_on_completion?: never, wait_for_completion_timeout?: never, allow_partial_search_results?: never, allow_partial_sequence_results?: never, size?: never, fields?: never, result_position?: never, runtime_mappings?: never, max_samples_per_key?: never } @@ -11909,36 +17261,98 @@ export type EqlSearchResponse = EqlEqlSearchResponseBase + _shards?: EsqlEsqlShardInfo +} -export type EsqlTableValuesKeywordValue = string | string[] +export interface EsqlEsqlClusterInfo { + total: integer + successful: integer + running: integer + skipped: integer + partial: integer + failed: integer + details: Record +} + +export type EsqlEsqlClusterStatus = 'running' | 'successful' | 'partial' | 'skipped' | 'failed' + +export interface EsqlEsqlColumnInfo { + name: string + type: string +} + +export type EsqlEsqlFormat = 'csv' | 'json' | 'tsv' | 'txt' | 'yaml' | 'cbor' | 'smile' | 'arrow' + +export interface EsqlEsqlResult { + took?: DurationValue + is_partial?: boolean + all_columns?: EsqlEsqlColumnInfo[] + columns: EsqlEsqlColumnInfo[] + values: FieldValue[][] + /** Cross-cluster search information. Present if `include_ccs_metadata` was `true` in the request + * and a cross-cluster search was performed. */ + _clusters?: EsqlEsqlClusterInfo + /** Profiling information. Present if `profile` was `true` in the request. + * The contents of this field are currently unstable. */ + profile?: any +} + +export interface EsqlEsqlShardFailure { + shard: Id + index: IndexName + node?: NodeId + reason: ErrorCause +} + +export interface EsqlEsqlShardInfo { + total: integer + successful?: integer + skipped?: integer + failed?: integer + failures?: EsqlEsqlShardFailure[] +} + +export interface EsqlTableValuesContainer { + integer?: EsqlTableValuesIntegerValue[] + keyword?: EsqlTableValuesKeywordValue[] + long?: EsqlTableValuesLongValue[] + double?: EsqlTableValuesLongDouble[] +} + +export type EsqlTableValuesIntegerValue = integer | integer[] + +export type EsqlTableValuesKeywordValue = string | string[] export type EsqlTableValuesLongDouble = double | double[] export type EsqlTableValuesLongValue = long | long[] export interface EsqlAsyncQueryRequest extends RequestBase { -/** The character to use between values within a CSV row. It is valid only for the CSV format. */ + /** The character to use between values within a CSV row. + * It is valid only for the CSV format. */ delimiter?: string - /** Indicates whether columns that are entirely `null` will be removed from the `columns` and `values` portion of the results. If `true`, the response will include an extra section under the name `all_columns` which has the name of all the columns. */ + /** Indicates whether columns that are entirely `null` will be removed from the `columns` and `values` portion of the results. + * If `true`, the response will include an extra section under the name `all_columns` which has the name of all the columns. */ drop_null_columns?: boolean /** A short version of the Accept header, for example `json` or `yaml`. */ format?: EsqlEsqlFormat - /** The period for which the query and its results are stored in the cluster. The default period is five days. When this period expires, the query and its results are deleted, even if the query is still ongoing. If the `keep_on_completion` parameter is false, Elasticsearch only stores async queries that do not complete within the period set by the `wait_for_completion_timeout` parameter, regardless of this value. */ + /** The period for which the query and its results are stored in the cluster. + * The default period is five days. + * When this period expires, the query and its results are deleted, even if the query is still ongoing. + * If the `keep_on_completion` parameter is false, Elasticsearch only stores async queries that do not complete within the period set by the `wait_for_completion_timeout` parameter, regardless of this value. */ keep_alive?: Duration - /** Indicates whether the query and its results are stored in the cluster. If false, the query and its results are stored in the cluster only if the request does not complete during the period set by the `wait_for_completion_timeout` parameter. */ + /** Indicates whether the query and its results are stored in the cluster. + * If false, the query and its results are stored in the cluster only if the request does not complete during the period set by the `wait_for_completion_timeout` parameter. */ keep_on_completion?: boolean - /** The period to wait for the request to finish. By default, the request waits for 1 second for the query results. If the query completes during this period, results are returned Otherwise, a query ID is returned that can later be used to retrieve the results. */ - wait_for_completion_timeout?: Duration /** By default, ES|QL returns results as rows. For example, FROM returns each individual document as one row. For the JSON, YAML, CBOR and smile formats, ES|QL can return the results in a columnar fashion where one row represents all the values of a certain column in the results. */ columnar?: boolean /** Specify a Query DSL query in the filter parameter to filter the set of documents that an ES|QL query runs on. */ @@ -11946,24 +17360,37 @@ export interface EsqlAsyncQueryRequest extends RequestBase { locale?: string /** To avoid any attempts of hacking or code injection, extract the values in a separate list of parameters. Use question mark placeholders (?) in the query string for each of the parameters. */ params?: FieldValue[] - /** If provided and `true` the response will include an extra `profile` object with information on how the query was executed. This information is for human debugging and its format can change at any time but it can give some insight into the performance of each part of the query. */ + /** If provided and `true` the response will include an extra `profile` object + * with information on how the query was executed. This information is for human debugging + * and its format can change at any time but it can give some insight into the performance + * of each part of the query. */ profile?: boolean /** The ES|QL query API accepts an ES|QL query string in the query parameter, runs it, and returns the results. */ query: string - /** Tables to use with the LOOKUP operation. The top level key is the table name and the next level key is the column name. */ + /** Tables to use with the LOOKUP operation. The top level key is the table + * name and the next level key is the column name. */ tables?: Record> - /** When set to `true` and performing a cross-cluster query, the response will include an extra `_clusters` object with information about the clusters that participated in the search along with info such as shards count. */ + /** When set to `true` and performing a cross-cluster query, the response will include an extra `_clusters` + * object with information about the clusters that participated in the search along with info such as shards + * count. */ include_ccs_metadata?: boolean + /** The period to wait for the request to finish. + * By default, the request waits for 1 second for the query results. + * If the query completes during this period, results are returned + * Otherwise, a query ID is returned that can later be used to retrieve the results. */ + wait_for_completion_timeout?: Duration /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { delimiter?: never, drop_null_columns?: never, format?: never, keep_alive?: never, keep_on_completion?: never, wait_for_completion_timeout?: never, columnar?: never, filter?: never, locale?: never, params?: never, profile?: never, query?: never, tables?: never, include_ccs_metadata?: never } + body?: string | { [key: string]: any } & { delimiter?: never, drop_null_columns?: never, format?: never, keep_alive?: never, keep_on_completion?: never, columnar?: never, filter?: never, locale?: never, params?: never, profile?: never, query?: never, tables?: never, include_ccs_metadata?: never, wait_for_completion_timeout?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { delimiter?: never, drop_null_columns?: never, format?: never, keep_alive?: never, keep_on_completion?: never, wait_for_completion_timeout?: never, columnar?: never, filter?: never, locale?: never, params?: never, profile?: never, query?: never, tables?: never, include_ccs_metadata?: never } + querystring?: { [key: string]: any } & { delimiter?: never, drop_null_columns?: never, format?: never, keep_alive?: never, keep_on_completion?: never, columnar?: never, filter?: never, locale?: never, params?: never, profile?: never, query?: never, tables?: never, include_ccs_metadata?: never, wait_for_completion_timeout?: never } } -export type EsqlAsyncQueryResponse = EsqlResult +export type EsqlAsyncQueryResponse = EsqlAsyncEsqlResult export interface EsqlAsyncQueryDeleteRequest extends RequestBase { -/** The unique identifier of the query. A query ID is provided in the ES|QL async query API response for a query that does not complete in the designated time. A query ID is also provided when the request was submitted with the `keep_on_completion` parameter set to `true`. */ + /** The unique identifier of the query. + * A query ID is provided in the ES|QL async query API response for a query that does not complete in the designated time. + * A query ID is also provided when the request was submitted with the `keep_on_completion` parameter set to `true`. */ id: Id /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { id?: never } @@ -11974,13 +17401,20 @@ export interface EsqlAsyncQueryDeleteRequest extends RequestBase { export type EsqlAsyncQueryDeleteResponse = AcknowledgedResponseBase export interface EsqlAsyncQueryGetRequest extends RequestBase { -/** The unique identifier of the query. A query ID is provided in the ES|QL async query API response for a query that does not complete in the designated time. A query ID is also provided when the request was submitted with the `keep_on_completion` parameter set to `true`. */ + /** The unique identifier of the query. + * A query ID is provided in the ES|QL async query API response for a query that does not complete in the designated time. + * A query ID is also provided when the request was submitted with the `keep_on_completion` parameter set to `true`. */ id: Id - /** Indicates whether columns that are entirely `null` will be removed from the `columns` and `values` portion of the results. If `true`, the response will include an extra section under the name `all_columns` which has the name of all the columns. */ + /** Indicates whether columns that are entirely `null` will be removed from the `columns` and `values` portion of the results. + * If `true`, the response will include an extra section under the name `all_columns` which has the name of all the columns. */ drop_null_columns?: boolean - /** The period for which the query and its results are stored in the cluster. When this period expires, the query and its results are deleted, even if the query is still ongoing. */ + /** The period for which the query and its results are stored in the cluster. + * When this period expires, the query and its results are deleted, even if the query is still ongoing. */ keep_alive?: Duration - /** The period to wait for the request to finish. By default, the request waits for complete query results. If the request completes during the period specified in this parameter, complete query results are returned. Otherwise, the response returns an `is_running` value of `true` and no results. */ + /** The period to wait for the request to finish. + * By default, the request waits for complete query results. + * If the request completes during the period specified in this parameter, complete query results are returned. + * Otherwise, the response returns an `is_running` value of `true` and no results. */ wait_for_completion_timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { id?: never, drop_null_columns?: never, keep_alive?: never, wait_for_completion_timeout?: never } @@ -11988,12 +17422,15 @@ export interface EsqlAsyncQueryGetRequest extends RequestBase { querystring?: { [key: string]: any } & { id?: never, drop_null_columns?: never, keep_alive?: never, wait_for_completion_timeout?: never } } -export type EsqlAsyncQueryGetResponse = EsqlResult +export type EsqlAsyncQueryGetResponse = EsqlAsyncEsqlResult export interface EsqlAsyncQueryStopRequest extends RequestBase { -/** The unique identifier of the query. A query ID is provided in the ES|QL async query API response for a query that does not complete in the designated time. A query ID is also provided when the request was submitted with the `keep_on_completion` parameter set to `true`. */ + /** The unique identifier of the query. + * A query ID is provided in the ES|QL async query API response for a query that does not complete in the designated time. + * A query ID is also provided when the request was submitted with the `keep_on_completion` parameter set to `true`. */ id: Id - /** Indicates whether columns that are entirely `null` will be removed from the `columns` and `values` portion of the results. If `true`, the response will include an extra section under the name `all_columns` which has the name of all the columns. */ + /** Indicates whether columns that are entirely `null` will be removed from the `columns` and `values` portion of the results. + * If `true`, the response will include an extra section under the name `all_columns` which has the name of all the columns. */ drop_null_columns?: boolean /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { id?: never, drop_null_columns?: never } @@ -12001,14 +17438,15 @@ export interface EsqlAsyncQueryStopRequest extends RequestBase { querystring?: { [key: string]: any } & { id?: never, drop_null_columns?: never } } -export type EsqlAsyncQueryStopResponse = EsqlResult +export type EsqlAsyncQueryStopResponse = EsqlEsqlResult export interface EsqlQueryRequest extends RequestBase { -/** A short version of the Accept header, e.g. json, yaml. */ + /** A short version of the Accept header, e.g. json, yaml. */ format?: EsqlEsqlFormat /** The character to use between values within a CSV row. Only valid for the CSV format. */ delimiter?: string - /** Should columns that are entirely `null` be removed from the `columns` and `values` portion of the results? Defaults to `false`. If `true` then the response will include an extra section under the name `all_columns` which has the name of all columns. */ + /** Should columns that are entirely `null` be removed from the `columns` and `values` portion of the results? + * Defaults to `false`. If `true` then the response will include an extra section under the name `all_columns` which has the name of all columns. */ drop_null_columns?: boolean /** By default, ES|QL returns results as rows. For example, FROM returns each individual document as one row. For the JSON, YAML, CBOR and smile formats, ES|QL can return the results in a columnar fashion where one row represents all the values of a certain column in the results. */ columnar?: boolean @@ -12017,13 +17455,19 @@ export interface EsqlQueryRequest extends RequestBase { locale?: string /** To avoid any attempts of hacking or code injection, extract the values in a separate list of parameters. Use question mark placeholders (?) in the query string for each of the parameters. */ params?: FieldValue[] - /** If provided and `true` the response will include an extra `profile` object with information on how the query was executed. This information is for human debugging and its format can change at any time but it can give some insight into the performance of each part of the query. */ + /** If provided and `true` the response will include an extra `profile` object + * with information on how the query was executed. This information is for human debugging + * and its format can change at any time but it can give some insight into the performance + * of each part of the query. */ profile?: boolean /** The ES|QL query API accepts an ES|QL query string in the query parameter, runs it, and returns the results. */ query: string - /** Tables to use with the LOOKUP operation. The top level key is the table name and the next level key is the column name. */ + /** Tables to use with the LOOKUP operation. The top level key is the table + * name and the next level key is the column name. */ tables?: Record> - /** When set to `true` and performing a cross-cluster query, the response will include an extra `_clusters` object with information about the clusters that participated in the search along with info such as shards count. */ + /** When set to `true` and performing a cross-cluster query, the response will include an extra `_clusters` + * object with information about the clusters that participated in the search along with info such as shards + * count. */ include_ccs_metadata?: boolean /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { format?: never, delimiter?: never, drop_null_columns?: never, columnar?: never, filter?: never, locale?: never, params?: never, profile?: never, query?: never, tables?: never, include_ccs_metadata?: never } @@ -12031,7 +17475,7 @@ export interface EsqlQueryRequest extends RequestBase { querystring?: { [key: string]: any } & { format?: never, delimiter?: never, drop_null_columns?: never, columnar?: never, filter?: never, locale?: never, params?: never, profile?: never, query?: never, tables?: never, include_ccs_metadata?: never } } -export type EsqlQueryResponse = EsqlResult +export type EsqlQueryResponse = EsqlEsqlResult export interface FeaturesFeature { name: string @@ -12039,7 +17483,7 @@ export interface FeaturesFeature { } export interface FeaturesGetFeaturesRequest extends RequestBase { -/** Period to wait for a connection to the master node. */ + /** Period to wait for a connection to the master node. */ master_timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { master_timeout?: never } @@ -12052,7 +17496,7 @@ export interface FeaturesGetFeaturesResponse { } export interface FeaturesResetFeaturesRequest extends RequestBase { -/** Period to wait for a connection to the master node. */ + /** Period to wait for a connection to the master node. */ master_timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { master_timeout?: never } @@ -12067,13 +17511,17 @@ export interface FeaturesResetFeaturesResponse { export type FleetCheckpoint = long export interface FleetGlobalCheckpointsRequest extends RequestBase { -/** A single index or index alias that resolves to a single index. */ + /** A single index or index alias that resolves to a single index. */ index: IndexName | IndexAlias - /** A boolean value which controls whether to wait (until the timeout) for the global checkpoints to advance past the provided `checkpoints`. */ + /** A boolean value which controls whether to wait (until the timeout) for the global checkpoints + * to advance past the provided `checkpoints`. */ wait_for_advance?: boolean - /** A boolean value which controls whether to wait (until the timeout) for the target index to exist and all primary shards be active. Can only be true when `wait_for_advance` is true. */ + /** A boolean value which controls whether to wait (until the timeout) for the target index to exist + * and all primary shards be active. Can only be true when `wait_for_advance` is true. */ wait_for_index?: boolean - /** A comma separated list of previous global checkpoints. When used in combination with `wait_for_advance`, the API will only return once the global checkpoints advances past the checkpoints. Providing an empty list will cause Elasticsearch to immediately return the current global checkpoints. */ + /** A comma separated list of previous global checkpoints. When used in combination with `wait_for_advance`, + * the API will only return once the global checkpoints advances past the checkpoints. Providing an empty list + * will cause Elasticsearch to immediately return the current global checkpoints. */ checkpoints?: FleetCheckpoint[] /** Period to wait for a global checkpoints to advance past `checkpoints`. */ timeout?: Duration @@ -12089,7 +17537,7 @@ export interface FleetGlobalCheckpointsResponse { } export interface FleetMsearchRequest extends RequestBase { -/** A single target to search. If the target is an index alias, it must resolve to a single index. */ + /** A single target to search. If the target is an index alias, it must resolve to a single index. */ index?: IndexName | IndexAlias /** If false, the request returns an error if any wildcard expression, index alias, or _all value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting foo*,bar* returns an error if an index starts with foo but no index starts with bar. */ allow_no_indices?: boolean @@ -12102,9 +17550,9 @@ export interface FleetMsearchRequest extends RequestBase { /** If true, missing or closed indices are not included in the response. */ ignore_unavailable?: boolean /** Maximum number of concurrent searches the multi search API can execute. */ - max_concurrent_searches?: long + max_concurrent_searches?: integer /** Maximum number of concurrent shard requests that each sub-search request executes per node. */ - max_concurrent_shard_requests?: long + max_concurrent_shard_requests?: integer /** Defines a threshold that enforces a pre-filter roundtrip to prefilter search shards based on query rewriting if the number of shards the search request expands to exceeds the threshold. This filter roundtrip can limit the number of shards significantly if for instance a shard can not match any documents based on its rewrite method i.e., if date filters are mandatory to match but the shard bounds and the query are disjoint. */ pre_filter_shard_size?: long /** Indicates whether global term and document frequencies should be used when scoring returned documents. */ @@ -12113,9 +17561,13 @@ export interface FleetMsearchRequest extends RequestBase { rest_total_hits_as_int?: boolean /** Specifies whether aggregation and suggester names should be prefixed by their respective types in the response. */ typed_keys?: boolean - /** A comma separated list of checkpoints. When configured, the search API will only be executed on a shard after the relevant checkpoint has become visible for search. Defaults to an empty list which will cause Elasticsearch to immediately execute the search. */ + /** A comma separated list of checkpoints. When configured, the search API will only be executed on a shard + * after the relevant checkpoint has become visible for search. Defaults to an empty list which will cause + * Elasticsearch to immediately execute the search. */ wait_for_checkpoints?: FleetCheckpoint[] - /** If true, returns partial results if there are shard request timeouts or [shard failures](https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-replication.html#shard-failures). If false, returns an error with no partial results. Defaults to the configured cluster setting `search.default_allow_partial_results` which is true by default. */ + /** If true, returns partial results if there are shard request timeouts or shard failures. + * If false, returns an error with no partial results. + * Defaults to the configured cluster setting `search.default_allow_partial_results`, which is true by default. */ allow_partial_search_results?: boolean searches?: MsearchRequestItem[] /** All values in `body` will be added to the request body. */ @@ -12129,7 +17581,7 @@ export interface FleetMsearchResponse { } export interface FleetSearchRequest extends RequestBase { -/** A single target to search. If the target is an index alias, it must resolve to a single index. */ + /** A single target to search. If the target is an index alias, it must resolve to a single index. */ index: IndexName | IndexAlias allow_no_indices?: boolean analyzer?: string @@ -12142,7 +17594,7 @@ export interface FleetSearchRequest extends RequestBase { ignore_throttled?: boolean ignore_unavailable?: boolean lenient?: boolean - max_concurrent_shard_requests?: long + max_concurrent_shard_requests?: integer preference?: string pre_filter_shard_size?: long request_cache?: boolean @@ -12160,9 +17612,13 @@ export interface FleetSearchRequest extends RequestBase { _source_excludes?: Fields _source_includes?: Fields q?: string - /** A comma separated list of checkpoints. When configured, the search API will only be executed on a shard after the relevant checkpoint has become visible for search. Defaults to an empty list which will cause Elasticsearch to immediately execute the search. */ + /** A comma separated list of checkpoints. When configured, the search API will only be executed on a shard + * after the relevant checkpoint has become visible for search. Defaults to an empty list which will cause + * Elasticsearch to immediately execute the search. */ wait_for_checkpoints?: FleetCheckpoint[] - /** If true, returns partial results if there are shard request timeouts or [shard failures](https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-replication.html#shard-failures). If false, returns an error with no partial results. Defaults to the configured cluster setting `search.default_allow_partial_results` which is true by default. */ + /** If true, returns partial results if there are shard request timeouts or shard failures. + * If false, returns an error with no partial results. + * Defaults to the configured cluster setting `search.default_allow_partial_results`, which is true by default. */ allow_partial_search_results?: boolean aggregations?: Record /** @alias aggregations */ @@ -12172,16 +17628,23 @@ export interface FleetSearchRequest extends RequestBase { explain?: boolean /** Configuration of search extensions defined by Elasticsearch plugins. */ ext?: Record - /** Starting document offset. By default, you cannot page through more than 10,000 hits using the from and size parameters. To page through more hits, use the search_after parameter. */ + /** Starting document offset. By default, you cannot page through more than 10,000 + * hits using the from and size parameters. To page through more hits, use the + * search_after parameter. */ from?: integer highlight?: SearchHighlight - /** Number of hits matching the query to count accurately. If true, the exact number of hits is returned at the cost of some performance. If false, the response does not include the total number of hits matching the query. Defaults to 10,000 hits. */ + /** Number of hits matching the query to count accurately. If true, the exact + * number of hits is returned at the cost of some performance. If false, the + * response does not include the total number of hits matching the query. + * Defaults to 10,000 hits. */ track_total_hits?: SearchTrackHits /** Boosts the _score of documents from specified indices. */ - indices_boost?: Record[] - /** Array of wildcard (*) patterns. The request returns doc values for field names matching these patterns in the hits.fields property of the response. */ + indices_boost?: Partial>[] + /** Array of wildcard (*) patterns. The request returns doc values for field + * names matching these patterns in the hits.fields property of the response. */ docvalue_fields?: (QueryDslFieldAndFormat | Field)[] - /** Minimum _score for matching documents. Documents with a lower _score are not included in the search results. */ + /** Minimum _score for matching documents. Documents with a lower _score are + * not included in search results and results collected by aggregations. */ min_score?: double post_filter?: QueryDslQueryContainer profile?: boolean @@ -12191,32 +17654,48 @@ export interface FleetSearchRequest extends RequestBase { /** Retrieve a script evaluation (based on different fields) for each hit. */ script_fields?: Record search_after?: SortResults - /** The number of hits to return. By default, you cannot page through more than 10,000 hits using the from and size parameters. To page through more hits, use the search_after parameter. */ + /** The number of hits to return. By default, you cannot page through more + * than 10,000 hits using the from and size parameters. To page through more + * hits, use the search_after parameter. */ size?: integer slice?: SlicedScroll sort?: Sort - /** Indicates which source fields are returned for matching documents. These fields are returned in the hits._source property of the search response. */ + /** Indicates which source fields are returned for matching documents. These + * fields are returned in the hits._source property of the search response. */ _source?: SearchSourceConfig - /** Array of wildcard (*) patterns. The request returns values for field names matching these patterns in the hits.fields property of the response. */ + /** Array of wildcard (*) patterns. The request returns values for field names + * matching these patterns in the hits.fields property of the response. */ fields?: (QueryDslFieldAndFormat | Field)[] suggest?: SearchSuggester - /** Maximum number of documents to collect for each shard. If a query reaches this limit, Elasticsearch terminates the query early. Elasticsearch collects documents before sorting. Defaults to 0, which does not terminate query execution early. */ + /** Maximum number of documents to collect for each shard. If a query reaches this + * limit, Elasticsearch terminates the query early. Elasticsearch collects documents + * before sorting. Defaults to 0, which does not terminate query execution early. */ terminate_after?: long - /** Specifies the period of time to wait for a response from each shard. If no response is received before the timeout expires, the request fails and returns an error. Defaults to no timeout. */ + /** Specifies the period of time to wait for a response from each shard. If no response + * is received before the timeout expires, the request fails and returns an error. + * Defaults to no timeout. */ timeout?: string /** If true, calculate and return document scores, even if the scores are not used for sorting. */ track_scores?: boolean /** If true, returns document version as part of a hit. */ version?: boolean - /** If true, returns sequence number and primary term of the last modification of each hit. See Optimistic concurrency control. */ + /** If true, returns sequence number and primary term of the last modification + * of each hit. See Optimistic concurrency control. */ seq_no_primary_term?: boolean - /** List of stored fields to return as part of a hit. If no fields are specified, no stored fields are included in the response. If this field is specified, the _source parameter defaults to false. You can pass _source: true to return both source fields and stored fields in the search response. */ + /** List of stored fields to return as part of a hit. If no fields are specified, + * no stored fields are included in the response. If this field is specified, the _source + * parameter defaults to false. You can pass _source: true to return both source fields + * and stored fields in the search response. */ stored_fields?: Fields - /** Limits the search to a point in time (PIT). If you provide a PIT, you cannot specify an in the request path. */ + /** Limits the search to a point in time (PIT). If you provide a PIT, you + * cannot specify an in the request path. */ pit?: SearchPointInTimeReference - /** Defines one or more runtime fields in the search request. These fields take precedence over mapped fields with the same name. */ + /** Defines one or more runtime fields in the search request. These fields take + * precedence over mapped fields with the same name. */ runtime_mappings?: MappingRuntimeFields - /** Stats groups to associate with the search. Each group maintains a statistics aggregation for its associated searches. You can retrieve these stats using the indices stats API. */ + /** Stats groups to associate with the search. Each group maintains a statistics + * aggregation for its associated searches. You can retrieve these stats using + * the indices stats API. */ stats?: string[] /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, analyzer?: never, analyze_wildcard?: never, batched_reduce_size?: never, ccs_minimize_roundtrips?: never, default_operator?: never, df?: never, expand_wildcards?: never, ignore_throttled?: never, ignore_unavailable?: never, lenient?: never, max_concurrent_shard_requests?: never, preference?: never, pre_filter_shard_size?: never, request_cache?: never, routing?: never, scroll?: never, search_type?: never, suggest_field?: never, suggest_mode?: never, suggest_size?: never, suggest_text?: never, typed_keys?: never, rest_total_hits_as_int?: never, _source_excludes?: never, _source_includes?: never, q?: never, wait_for_checkpoints?: never, allow_partial_search_results?: never, aggregations?: never, aggs?: never, collapse?: never, explain?: never, ext?: never, from?: never, highlight?: never, track_total_hits?: never, indices_boost?: never, docvalue_fields?: never, min_score?: never, post_filter?: never, profile?: never, query?: never, rescore?: never, script_fields?: never, search_after?: never, size?: never, slice?: never, sort?: never, _source?: never, fields?: never, suggest?: never, terminate_after?: never, timeout?: never, track_scores?: never, version?: never, seq_no_primary_term?: never, stored_fields?: never, pit?: never, runtime_mappings?: never, stats?: never } @@ -12249,15 +17728,28 @@ export interface GraphConnection { } export interface GraphExploreControls { + /** To avoid the top-matching documents sample being dominated by a single source of results, it is sometimes necessary to request diversity in the sample. + * You can do this by selecting a single-value field and setting a maximum number of documents per value for that field. */ sample_diversity?: GraphSampleDiversity + /** Each hop considers a sample of the best-matching documents on each shard. + * Using samples improves the speed of execution and keeps exploration focused on meaningfully-connected terms. + * Very small values (less than 50) might not provide sufficient weight-of-evidence to identify significant connections between terms. + * Very large sample sizes can dilute the quality of the results and increase execution times. */ sample_size?: integer + /** The length of time in milliseconds after which exploration will be halted and the results gathered so far are returned. + * This timeout is honored on a best-effort basis. + * Execution might overrun this timeout if, for example, a long pause is encountered while FieldData is loaded for a field. */ timeout?: Duration + /** Filters associated terms so only those that are significantly associated with your query are included. */ use_significance: boolean } export interface GraphHop { + /** Specifies one or more fields from which you want to extract terms that are associated with the specified vertices. */ connections?: GraphHop + /** An optional guiding query that constrains the Graph API as it explores connected terms. */ query?: QueryDslQueryContainer + /** Contains the fields you are interested in. */ vertices: GraphVertexDefinition[] } @@ -12274,11 +17766,18 @@ export interface GraphVertex { } export interface GraphVertexDefinition { + /** Prevents the specified terms from being included in the results. */ exclude?: string[] + /** Identifies a field in the documents of interest. */ field: Field + /** Identifies the terms of interest that form the starting points from which you want to spider out. */ include?: (GraphVertexInclude | string)[] + /** Specifies how many documents must contain a pair of terms before it is considered to be a useful connection. + * This setting acts as a certainty threshold. */ min_doc_count?: long + /** Controls how many documents on a particular shard have to contain a pair of terms before the connection is returned for global consideration. */ shard_min_doc_count?: long + /** Specifies the maximum number of vertex terms returned for each field. */ size?: integer } @@ -12288,11 +17787,13 @@ export interface GraphVertexInclude { } export interface GraphExploreRequest extends RequestBase { -/** Name of the index. */ + /** Name of the index. */ index: Indices /** Custom value used to route operations to a specific shard. */ routing?: Routing - /** Specifies the period of time to wait for a response from each shard. If no response is received before the timeout expires, the request fails and returns an error. Defaults to no timeout. */ + /** Specifies the period of time to wait for a response from each shard. + * If no response is received before the timeout expires, the request fails and returns an error. + * Defaults to no timeout. */ timeout?: Duration /** Specifies or more fields from which you want to extract terms that are associated with the specified vertices. */ connections?: GraphHop @@ -12317,18 +17818,31 @@ export interface GraphExploreResponse { } export interface IlmActions { + /** Phases allowed: warm, cold. */ allocate?: IlmAllocateAction + /** Phases allowed: delete. */ delete?: IlmDeleteAction + /** Phases allowed: hot, warm, cold. */ downsample?: IlmDownsampleAction + /** The freeze action is a noop in 8.x */ freeze?: EmptyObject + /** Phases allowed: hot, warm. */ forcemerge?: IlmForceMergeAction + /** Phases allowed: warm, cold. */ migrate?: IlmMigrateAction + /** Phases allowed: hot, warm, cold. */ readonly?: EmptyObject + /** Phases allowed: hot. */ rollover?: IlmRolloverAction + /** Phases allowed: hot, warm, cold. */ set_priority?: IlmSetPriorityAction + /** Phases allowed: hot, cold, frozen. */ searchable_snapshot?: IlmSearchableSnapshotAction + /** Phases allowed: hot, warm. */ shrink?: IlmShrinkAction + /** Phases allowed: hot, warm, cold, frozen. */ unfollow?: EmptyObject + /** Phases allowed: delete. */ wait_for_snapshot?: IlmWaitForSnapshotAction } @@ -12373,6 +17887,7 @@ export interface IlmPhases { export interface IlmPolicy { phases: IlmPhases + /** Arbitrary metadata that is not automatically generated or used by Elasticsearch. */ _meta?: Metadata } @@ -12409,7 +17924,7 @@ export interface IlmWaitForSnapshotAction { } export interface IlmDeleteLifecycleRequest extends RequestBase { -/** Identifier for the policy. */ + /** Identifier for the policy. */ name: Name /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration @@ -12468,7 +17983,8 @@ export interface IlmExplainLifecycleLifecycleExplainUnmanaged { } export interface IlmExplainLifecycleRequest extends RequestBase { -/** Comma-separated list of data streams, indices, and aliases to target. Supports wildcards (`*`). To target all data streams and indices, use `*` or `_all`. */ + /** Comma-separated list of data streams, indices, and aliases to target. Supports wildcards (`*`). + * To target all data streams and indices, use `*` or `_all`. */ index: IndexName /** Filters the returned indices to only indices that are managed by ILM and are in an error state, either due to an encountering an error while executing the policy, or attempting to use a policy that does not exist. */ only_errors?: boolean @@ -12493,7 +18009,7 @@ export interface IlmGetLifecycleLifecycle { } export interface IlmGetLifecycleRequest extends RequestBase { -/** Identifier for the policy. */ + /** Identifier for the policy. */ name?: Name /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration @@ -12519,9 +18035,12 @@ export interface IlmGetStatusResponse { } export interface IlmMigrateToDataTiersRequest extends RequestBase { -/** If true, simulates the migration from node attributes based allocation filters to data tiers, but does not perform the migration. This provides a way to retrieve the indices and ILM policies that need to be migrated. */ + /** If true, simulates the migration from node attributes based allocation filters to data tiers, but does not perform the migration. + * This provides a way to retrieve the indices and ILM policies that need to be migrated. */ dry_run?: boolean - /** The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout. */ + /** The period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. + * It can also be set to `-1` to indicate that the request should never timeout. */ master_timeout?: Duration legacy_template_to_delete?: string node_attribute?: string @@ -12533,16 +18052,23 @@ export interface IlmMigrateToDataTiersRequest extends RequestBase { export interface IlmMigrateToDataTiersResponse { dry_run: boolean + /** The name of the legacy index template that was deleted. + * This information is missing if no legacy index templates were deleted. */ removed_legacy_template: string + /** The ILM policies that were updated. */ migrated_ilm_policies: string[] + /** The indices that were migrated to tier preference routing. */ migrated_indices: Indices + /** The legacy index templates that were updated to not contain custom routing settings for the provided data attribute. */ migrated_legacy_templates: string[] + /** The composable index templates that were updated to not contain custom routing settings for the provided data attribute. */ migrated_composable_templates: string[] + /** The component templates that were updated to not contain custom routing settings for the provided data attribute. */ migrated_component_templates: string[] } export interface IlmMoveToStepRequest extends RequestBase { -/** The name of the index whose lifecycle step is to change */ + /** The name of the index whose lifecycle step is to change */ index: IndexName /** The step that the index is expected to be in. */ current_step: IlmMoveToStepStepKey @@ -12557,13 +18083,15 @@ export interface IlmMoveToStepRequest extends RequestBase { export type IlmMoveToStepResponse = AcknowledgedResponseBase export interface IlmMoveToStepStepKey { + /** The optional action to which the index will be moved. */ action?: string + /** The optional step name to which the index will be moved. */ name?: string phase: string } export interface IlmPutLifecycleRequest extends RequestBase { -/** Identifier for the policy. */ + /** Identifier for the policy. */ name: Name /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration @@ -12579,7 +18107,7 @@ export interface IlmPutLifecycleRequest extends RequestBase { export type IlmPutLifecycleResponse = AcknowledgedResponseBase export interface IlmRemovePolicyRequest extends RequestBase { -/** The name of the index to remove policy on */ + /** The name of the index to remove policy on */ index: IndexName /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { index?: never } @@ -12593,7 +18121,7 @@ export interface IlmRemovePolicyResponse { } export interface IlmRetryRequest extends RequestBase { -/** The name of the indices (comma-separated) whose failed lifecycle step is to be retry */ + /** The name of the indices (comma-separated) whose failed lifecycle step is to be retry */ index: IndexName /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { index?: never } @@ -12604,7 +18132,7 @@ export interface IlmRetryRequest extends RequestBase { export type IlmRetryResponse = AcknowledgedResponseBase export interface IlmStartRequest extends RequestBase { -/** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ + /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration @@ -12617,7 +18145,7 @@ export interface IlmStartRequest extends RequestBase { export type IlmStartResponse = AcknowledgedResponseBase export interface IlmStopRequest extends RequestBase { -/** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ + /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration @@ -12630,20 +18158,38 @@ export interface IlmStopRequest extends RequestBase { export type IlmStopResponse = AcknowledgedResponseBase export interface IndicesAlias { + /** Query used to limit documents the alias can access. */ filter?: QueryDslQueryContainer + /** Value used to route indexing operations to a specific shard. + * If specified, this overwrites the `routing` value for indexing operations. */ index_routing?: Routing + /** If `true`, the alias is hidden. + * All indices for the alias must have the same `is_hidden` value. */ is_hidden?: boolean + /** If `true`, the index is the write index for the alias. */ is_write_index?: boolean + /** Value used to route indexing and search operations to a specific shard. */ routing?: Routing + /** Value used to route search operations to a specific shard. + * If specified, this overwrites the `routing` value for search operations. */ search_routing?: Routing } export interface IndicesAliasDefinition { + /** Query used to limit documents the alias can access. */ filter?: QueryDslQueryContainer + /** Value used to route indexing operations to a specific shard. + * If specified, this overwrites the `routing` value for indexing operations. */ index_routing?: string + /** If `true`, the index is the write index for the alias. */ is_write_index?: boolean + /** Value used to route indexing and search operations to a specific shard. */ routing?: string + /** Value used to route search operations to a specific shard. + * If specified, this overwrites the `routing` value for search operations. */ search_routing?: string + /** If `true`, the alias is hidden. + * All indices for the alias must have the same `is_hidden` value. */ is_hidden?: boolean } @@ -12652,40 +18198,76 @@ export interface IndicesCacheQueries { } export interface IndicesDataStream { + /** Custom metadata for the stream, copied from the `_meta` object of the stream’s matching index template. + * If empty, the response omits this property. */ _meta?: Metadata + /** If `true`, the data stream allows custom routing on write request. */ allow_custom_routing?: boolean + /** Information about failure store backing indices */ failure_store?: IndicesFailureStore + /** Current generation for the data stream. This number acts as a cumulative count of the stream’s rollovers, starting at 1. */ generation: integer + /** If `true`, the data stream is hidden. */ hidden: boolean + /** Name of the current ILM lifecycle policy in the stream’s matching index template. + * This lifecycle policy is set in the `index.lifecycle.name` setting. + * If the template does not include a lifecycle policy, this property is not included in the response. + * NOTE: A data stream’s backing indices may be assigned different lifecycle policies. To retrieve the lifecycle policy for individual backing indices, use the get index settings API. */ ilm_policy?: Name + /** Name of the lifecycle system that'll manage the next generation of the data stream. */ next_generation_managed_by: IndicesManagedBy + /** Indicates if ILM should take precedence over DSL in case both are configured to managed this data stream. */ prefer_ilm: boolean + /** Array of objects containing information about the data stream’s backing indices. + * The last item in this array contains information about the stream’s current write index. */ indices: IndicesDataStreamIndex[] + /** Contains the configuration for the data stream lifecycle of this data stream. */ lifecycle?: IndicesDataStreamLifecycleWithRollover + /** Name of the data stream. */ name: DataStreamName + /** If `true`, the data stream is created and managed by cross-cluster replication and the local cluster can not write into this data stream or change its mappings. */ replicated?: boolean + /** If `true`, the next write to this data stream will trigger a rollover first and the document will be indexed in the new backing index. If the rollover fails the indexing request will fail too. */ rollover_on_write: boolean + /** Health status of the data stream. + * This health status is based on the state of the primary and replica shards of the stream’s backing indices. */ status: HealthStatus + /** If `true`, the data stream is created and managed by an Elastic stack component and cannot be modified through normal user interaction. */ system?: boolean + /** Name of the index template used to create the data stream’s backing indices. + * The template’s index pattern must match the name of this data stream. */ template: Name + /** Information about the `@timestamp` field in the data stream. */ timestamp_field: IndicesDataStreamTimestampField } export interface IndicesDataStreamIndex { + /** Name of the backing index. */ index_name: IndexName + /** Universally unique identifier (UUID) for the index. */ index_uuid: Uuid + /** Name of the current ILM lifecycle policy configured for this backing index. */ ilm_policy?: Name + /** Name of the lifecycle system that's currently managing this backing index. */ managed_by?: IndicesManagedBy + /** Indicates if ILM should take precedence over DSL in case both are configured to manage this index. */ prefer_ilm?: boolean } export interface IndicesDataStreamLifecycle { + /** If defined, every document added to this data stream will be stored at least for this time frame. + * Any time after this duration the document could be deleted. + * When empty, every document in this data stream will be stored indefinitely. */ data_retention?: Duration + /** The downsampling configuration to execute for the managed backing index after rollover. */ downsampling?: IndicesDataStreamLifecycleDownsampling + /** If defined, it turns data stream lifecycle on/off (`true`/`false`) for this data stream. A data stream lifecycle + * that's disabled (enabled: `false`) will have no effect on the data stream. */ enabled?: boolean } export interface IndicesDataStreamLifecycleDownsampling { + /** The list of downsampling rounds to execute as part of this downsampling configuration */ rounds: IndicesDownsamplingRound[] } @@ -12703,10 +18285,14 @@ export interface IndicesDataStreamLifecycleRolloverConditions { } export interface IndicesDataStreamLifecycleWithRollover extends IndicesDataStreamLifecycle { + /** The conditions which will trigger the rollover of a backing index as configured by the cluster setting `cluster.lifecycle.default.rollover`. + * This property is an implementation detail and it will only be retrieved when the query param `include_defaults` is set to true. + * The contents of this field are subject to change. */ rollover?: IndicesDataStreamLifecycleRolloverConditions } export interface IndicesDataStreamTimestampField { + /** Name of the timestamp field for the data stream, which must be `@timestamp`. The `@timestamp` field must be included in every document indexed to the data stream. */ name: Field } @@ -12716,11 +18302,14 @@ export interface IndicesDataStreamVisibility { } export interface IndicesDownsampleConfig { + /** The interval at which to aggregate the original time series index. */ fixed_interval: DurationLarge } export interface IndicesDownsamplingRound { + /** The duration since rollover when this downsampling round should execute */ after: Duration + /** The downsample configuration to execute. */ config: IndicesDownsampleConfig } @@ -12792,7 +18381,9 @@ export interface IndicesIndexSettingsKeys { routing_path?: string | string[] soft_deletes?: IndicesSoftDeletes sort?: IndicesIndexSegmentSort + /** @remarks This property is not supported on Elastic Cloud Serverless. */ number_of_shards?: integer | string + /** @remarks This property is not supported on Elastic Cloud Serverless. */ number_of_replicas?: integer | string number_of_routing_shards?: integer check_on_startup?: IndicesIndexCheckOnStartup @@ -12800,7 +18391,7 @@ export interface IndicesIndexSettingsKeys { routing_partition_size?: SpecUtilsStringified load_fixed_bitset_filters_eagerly?: boolean hidden?: boolean | string - auto_expand_replicas?: string + auto_expand_replicas?: SpecUtilsWithNullValue merge?: IndicesMerge search?: IndicesSettingsSearch refresh_interval?: Duration @@ -12813,6 +18404,7 @@ export interface IndicesIndexSettingsKeys { max_shingle_diff?: integer blocks?: IndicesIndexSettingBlocks max_refresh_listeners?: integer + /** Settings to define analyzers, tokenizers, token filters and character filters. */ analyze?: IndicesSettingsAnalyze highlight?: IndicesSettingsHighlight max_terms_count?: integer @@ -12838,10 +18430,14 @@ export interface IndicesIndexSettingsKeys { settings?: IndicesIndexSettings time_series?: IndicesIndexSettingsTimeSeries queries?: IndicesQueries + /** Configure custom similarity settings to customize how search results are scored. */ similarity?: Record + /** Enable or disable dynamic mapping for an index. */ mapping?: IndicesMappingLimitSettings 'indexing.slowlog'?: IndicesIndexingSlowlogSettings + /** Configure indexing back pressure limits. */ indexing_pressure?: IndicesIndexingPressure + /** The store module allows you to control how index data is stored and accessed on disk. */ store?: IndicesStorage } export type IndicesIndexSettings = IndicesIndexSettingsKeys @@ -12856,16 +18452,33 @@ export interface IndicesIndexSettingsAnalysis { } export interface IndicesIndexSettingsLifecycle { + /** The name of the policy to use to manage the index. For information about how Elasticsearch applies policy changes, see Policy updates. */ name?: Name + /** Indicates whether or not the index has been rolled over. Automatically set to true when ILM completes the rollover action. + * You can explicitly set it to skip rollover. */ indexing_complete?: SpecUtilsStringified + /** If specified, this is the timestamp used to calculate the index age for its phase transitions. Use this setting + * if you create a new index that contains old data and want to use the original creation date to calculate the index + * age. Specified as a Unix epoch value in milliseconds. */ origination_date?: long + /** Set to true to parse the origination date from the index name. This origination date is used to calculate the index age + * for its phase transitions. The index name must match the pattern ^.*-{date_format}-\\d+, where the date_format is + * yyyy.MM.dd and the trailing digits are optional. An index that was rolled over would normally match the full format, + * for example logs-2016.10.31-000002). If the index name doesn’t match the pattern, index creation fails. */ parse_origination_date?: boolean step?: IndicesIndexSettingsLifecycleStep + /** The index alias to update when the index rolls over. Specify when using a policy that contains a rollover action. + * When the index rolls over, the alias is updated to reflect that the index is no longer the write index. For more + * information about rolling indices, see Rollover. */ rollover_alias?: string + /** Preference for the system that manages a data stream backing index (preferring ILM when both ILM and DLM are + * applicable for an index). */ prefer_ilm?: boolean | string } export interface IndicesIndexSettingsLifecycleStep { + /** Time to wait for the cluster to resolve allocation issues during an ILM shrink action. Must be greater than 1h (1 hour). + * See Shard allocation for shrink. */ wait_time_threshold?: Duration } @@ -12878,32 +18491,63 @@ export interface IndicesIndexState { aliases?: Record mappings?: MappingTypeMapping settings?: IndicesIndexSettings + /** Default settings, included when the request's `include_default` is `true`. */ defaults?: IndicesIndexSettings data_stream?: DataStreamName + /** Data stream lifecycle applicable if this is a data stream. */ lifecycle?: IndicesDataStreamLifecycle } export interface IndicesIndexTemplate { + /** Name of the index template. */ index_patterns: Names + /** An ordered list of component template names. + * Component templates are merged in the order specified, meaning that the last component template specified has the highest precedence. */ composed_of: Name[] + /** Template to be applied. + * It may optionally include an `aliases`, `mappings`, or `settings` configuration. */ template?: IndicesIndexTemplateSummary + /** Version number used to manage index templates externally. + * This number is not automatically generated by Elasticsearch. */ version?: VersionNumber + /** Priority to determine index template precedence when a new data stream or index is created. + * The index template with the highest priority is chosen. + * If no priority is specified the template is treated as though it is of priority 0 (lowest priority). + * This number is not automatically generated by Elasticsearch. */ priority?: long + /** Optional user metadata about the index template. May have any contents. + * This map is not automatically generated by Elasticsearch. */ _meta?: Metadata allow_auto_create?: boolean + /** If this object is included, the template is used to create data streams and their backing indices. + * Supports an empty object. + * Data streams require a matching index template with a `data_stream` object. */ data_stream?: IndicesIndexTemplateDataStreamConfiguration + /** Marks this index template as deprecated. + * When creating or updating a non-deprecated index template that uses deprecated components, + * Elasticsearch will emit a deprecation warning. */ deprecated?: boolean + /** A list of component template names that are allowed to be absent. */ ignore_missing_component_templates?: Names } export interface IndicesIndexTemplateDataStreamConfiguration { + /** If true, the data stream is hidden. */ hidden?: boolean + /** If true, the data stream supports custom routing. */ allow_custom_routing?: boolean } export interface IndicesIndexTemplateSummary { + /** Aliases to add. + * If the index template includes a `data_stream` object, these are data stream aliases. + * Otherwise, these are index aliases. + * Data stream aliases ignore the `index_routing`, `routing`, and `search_routing` options. */ aliases?: Record + /** Mapping for fields in the index. + * If specified, this mapping can include field names, field data types, and mapping parameters. */ mappings?: MappingTypeMapping + /** Configuration options for the index. */ settings?: IndicesIndexSettings lifecycle?: IndicesDataStreamLifecycleWithRollover } @@ -12918,6 +18562,9 @@ export interface IndicesIndexingPressure { } export interface IndicesIndexingPressureMemory { + /** Number of outstanding bytes that may be consumed by indexing requests. When this limit is reached or exceeded, + * the node will reject new coordinating and primary operations. When replica operations consume 1.5x this limit, + * the node will reject new replica operations. Defaults to 10% of the heap. */ limit?: integer } @@ -12929,6 +18576,8 @@ export interface IndicesIndexingSlowlogSettings { } export interface IndicesIndexingSlowlogTresholds { + /** The indexing slow log, similar in functionality to the search slow log. The log file name ends with `_index_indexing_slowlog.json`. + * Log and the thresholds are configured in the same way as the search slowlog. */ index?: IndicesSlowlogTresholdLevels } @@ -12947,22 +18596,34 @@ export interface IndicesMappingLimitSettings { } export interface IndicesMappingLimitSettingsDepth { + /** The maximum depth for a field, which is measured as the number of inner objects. For instance, if all fields are defined + * at the root object level, then the depth is 1. If there is one object mapping, then the depth is 2, etc. */ limit?: long } export interface IndicesMappingLimitSettingsDimensionFields { + /** [preview] This functionality is in technical preview and may be changed or removed in a future release. + * Elastic will work to fix any issues, but features in technical preview are not subject to the support SLA of official GA features. */ limit?: long } export interface IndicesMappingLimitSettingsFieldNameLength { + /** Setting for the maximum length of a field name. This setting isn’t really something that addresses mappings explosion but + * might still be useful if you want to limit the field length. It usually shouldn’t be necessary to set this setting. The + * default is okay unless a user starts to add a huge number of fields with really long names. Default is `Long.MAX_VALUE` (no limit). */ limit?: long } export interface IndicesMappingLimitSettingsNestedFields { + /** The maximum number of distinct nested mappings in an index. The nested type should only be used in special cases, when + * arrays of objects need to be queried independently of each other. To safeguard against poorly designed mappings, this + * setting limits the number of unique nested types per index. */ limit?: long } export interface IndicesMappingLimitSettingsNestedObjects { + /** The maximum number of nested JSON objects that a single document can contain across all nested types. This limit helps + * to prevent out of memory errors when a document contains too many nested objects. */ limit?: long } @@ -12971,7 +18632,15 @@ export interface IndicesMappingLimitSettingsSourceFields { } export interface IndicesMappingLimitSettingsTotalFields { + /** The maximum number of fields in an index. Field and object mappings, as well as field aliases count towards this limit. + * The limit is in place to prevent mappings and searches from becoming too large. Higher values can lead to performance + * degradations and memory issues, especially in clusters with a high load or few resources. */ limit?: long | string + /** This setting determines what happens when a dynamically mapped field would exceed the total fields limit. When set + * to false (the default), the index request of the document that tries to add a dynamic field to the mapping will fail + * with the message Limit of total fields [X] has been exceeded. When set to true, the index request will not fail. + * Instead, fields that would exceed the limit are not added to the mapping, similar to dynamic: false. + * The fields that were not added to the mapping will be added to the _ignored field. */ ignore_dynamic_beyond_limit?: boolean | string } @@ -13069,8 +18738,8 @@ export interface IndicesSettingsSimilarityLmj { export interface IndicesSettingsSimilarityScripted { type: 'scripted' - script: Script | string - weight_script?: Script | string + script: Script | ScriptSource + weight_script?: Script | ScriptSource } export interface IndicesSlowlogSettings { @@ -13093,14 +18762,23 @@ export interface IndicesSlowlogTresholds { } export interface IndicesSoftDeletes { + /** Indicates whether soft deletes are enabled on the index. */ enabled?: boolean + /** The maximum period to retain a shard history retention lease before it is considered expired. + * Shard history retention leases ensure that soft deletes are retained during merges on the Lucene + * index. If a soft delete is merged away before it can be replicated to a follower the following + * process will fail due to incomplete history on the leader. */ retention_lease?: IndicesRetentionLease } -export type IndicesSourceMode = 'DISABLED' | 'STORED' | 'SYNTHETIC' +export type IndicesSourceMode = 'disabled' | 'stored' | 'synthetic' export interface IndicesStorage { type: IndicesStorageType + /** You can restrict the use of the mmapfs and the related hybridfs store type via the setting node.store.allow_mmap. + * This is a boolean setting indicating whether or not memory-mapping is allowed. The default is to allow it. This + * setting is useful, for example, if you are in an environment where you can not control the ability to create a lot + * of memory maps so you need disable the ability to use memory-mapping. */ allow_mmap?: boolean } @@ -13116,8 +18794,16 @@ export interface IndicesTemplateMapping { } export interface IndicesTranslog { + /** How often the translog is fsynced to disk and committed, regardless of write operations. + * Values less than 100ms are not allowed. */ sync_interval?: Duration + /** Whether or not to `fsync` and commit the translog after every index, delete, update, or bulk request. */ durability?: IndicesTranslogDurability + /** The translog stores all operations that are not yet safely persisted in Lucene (i.e., are not + * part of a Lucene commit point). Although these operations are available for reads, they will need + * to be replayed if the shard was stopped and had to be recovered. This setting controls the + * maximum total size of these operations, to prevent recoveries from taking too long. Once the + * maximum size has been reached a flush will happen, generating a new Lucene commit point. */ flush_threshold_size?: ByteSize retention?: IndicesTranslogRetention } @@ -13125,7 +18811,17 @@ export interface IndicesTranslog { export type IndicesTranslogDurability = 'request' | 'REQUEST' | 'async' | 'ASYNC' export interface IndicesTranslogRetention { + /** This controls the total size of translog files to keep for each shard. Keeping more translog files increases + * the chance of performing an operation based sync when recovering a replica. If the translog files are not + * sufficient, replica recovery will fall back to a file based sync. This setting is ignored, and should not be + * set, if soft deletes are enabled. Soft deletes are enabled by default in indices created in Elasticsearch + * versions 7.0.0 and later. */ size?: ByteSize + /** This controls the maximum duration for which translog files are kept by each shard. Keeping more + * translog files increases the chance of performing an operation based sync when recovering replicas. If + * the translog files are not sufficient, replica recovery will fall back to a file based sync. This setting + * is ignored, and should not be set, if soft deletes are enabled. Soft deletes are enabled by default in + * indices created in Elasticsearch versions 7.0.0 and later. */ age?: Duration } @@ -13137,19 +18833,30 @@ export interface IndicesAddBlockIndicesBlockStatus { } export interface IndicesAddBlockRequest extends RequestBase { -/** A comma-separated list or wildcard expression of index names used to limit the request. By default, you must explicitly name the indices you are adding blocks to. To allow the adding of blocks to indices with `_all`, `*`, or other wildcard expressions, change the `action.destructive_requires_name` setting to `false`. You can update this setting in the `elasticsearch.yml` file or by using the cluster update settings API. */ + /** A comma-separated list or wildcard expression of index names used to limit the request. + * By default, you must explicitly name the indices you are adding blocks to. + * To allow the adding of blocks to indices with `_all`, `*`, or other wildcard expressions, change the `action.destructive_requires_name` setting to `false`. + * You can update this setting in the `elasticsearch.yml` file or by using the cluster update settings API. */ index: IndexName /** The block type to add to the index. */ block: IndicesAddBlockIndicesBlockOptions - /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. */ + /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. + * This behavior applies even if the request targets other open indices. + * For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. */ allow_no_indices?: boolean - /** The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports comma-separated values, such as `open,hidden`. */ + /** The type of index that wildcard patterns can match. + * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. + * It supports comma-separated values, such as `open,hidden`. */ expand_wildcards?: ExpandWildcards /** If `false`, the request returns an error if it targets a missing or closed index. */ ignore_unavailable?: boolean - /** The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout. */ + /** The period to wait for the master node. + * If the master node is not available before the timeout expires, the request fails and returns an error. + * It can also be set to `-1` to indicate that the request should never timeout. */ master_timeout?: Duration - /** The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. If no response is received before the timeout expires, the cluster metadata update still applies but the response will indicate that it was not completely acknowledged. It can also be set to `-1` to indicate that the request should never timeout. */ + /** The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. + * If no response is received before the timeout expires, the cluster metadata update still applies but the response will indicate that it was not completely acknowledged. + * It can also be set to `-1` to indicate that the request should never timeout. */ timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { index?: never, block?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, master_timeout?: never, timeout?: never } @@ -13205,9 +18912,12 @@ export type IndicesAnalyzeExplainAnalyzeToken = IndicesAnalyzeExplainAnalyzeToke & { [property: string]: any } export interface IndicesAnalyzeRequest extends RequestBase { -/** Index used to derive the analyzer. If specified, the `analyzer` or field parameter overrides this value. If no index is specified or the index does not have a default analyzer, the analyze API uses the standard analyzer. */ + /** Index used to derive the analyzer. + * If specified, the `analyzer` or field parameter overrides this value. + * If no index is specified or the index does not have a default analyzer, the analyze API uses the standard analyzer. */ index?: IndexName - /** The name of the analyzer that should be applied to the provided `text`. This could be a built-in analyzer, or an analyzer that’s been configured in the index. */ + /** The name of the analyzer that should be applied to the provided `text`. + * This could be a built-in analyzer, or an analyzer that’s been configured in the index. */ analyzer?: string /** Array of token attributes used to filter the output of the `explain` parameter. */ attributes?: string[] @@ -13215,13 +18925,16 @@ export interface IndicesAnalyzeRequest extends RequestBase { char_filter?: AnalysisCharFilter[] /** If `true`, the response includes token attributes and additional details. */ explain?: boolean - /** Field used to derive the analyzer. To use this parameter, you must specify an index. If specified, the `analyzer` parameter overrides this value. */ + /** Field used to derive the analyzer. + * To use this parameter, you must specify an index. + * If specified, the `analyzer` parameter overrides this value. */ field?: Field /** Array of token filters used to apply after the tokenizer. */ filter?: AnalysisTokenFilter[] /** Normalizer to use to convert text into a single token. */ normalizer?: string - /** Text to analyze. If an array of strings is provided, it is analyzed as a multi-value field. */ + /** Text to analyze. + * If an array of strings is provided, it is analyzed as a multi-value field. */ text?: IndicesAnalyzeTextToAnalyze /** Tokenizer to use to convert text into tokens. */ tokenizer?: AnalysisTokenizer @@ -13244,7 +18957,7 @@ export interface IndicesAnalyzeTokenDetail { } export interface IndicesCancelMigrateReindexRequest extends RequestBase { -/** The index or data stream name */ + /** The index or data stream name */ index: Indices /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { index?: never } @@ -13255,13 +18968,20 @@ export interface IndicesCancelMigrateReindexRequest extends RequestBase { export type IndicesCancelMigrateReindexResponse = AcknowledgedResponseBase export interface IndicesClearCacheRequest extends RequestBase { -/** Comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. */ + /** Comma-separated list of data streams, indices, and aliases used to limit the request. + * Supports wildcards (`*`). + * To target all data streams and indices, omit this parameter or use `*` or `_all`. */ index?: Indices - /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. */ + /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. + * This behavior applies even if the request targets other open indices. */ allow_no_indices?: boolean - /** Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. */ + /** Type of index that wildcard patterns can match. + * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. + * Supports comma-separated values, such as `open,hidden`. + * Valid values are: `all`, `open`, `closed`, `hidden`, `none`. */ expand_wildcards?: ExpandWildcards - /** If `true`, clears the fields cache. Use the `fields` parameter to clear the cache of specific fields only. */ + /** If `true`, clears the fields cache. + * Use the `fields` parameter to clear the cache of specific fields only. */ fielddata?: boolean /** Comma-separated list of field names used to limit the `fielddata` parameter. */ fields?: Fields @@ -13280,15 +19000,18 @@ export interface IndicesClearCacheRequest extends RequestBase { export type IndicesClearCacheResponse = ShardsOperationResponseBase export interface IndicesCloneRequest extends RequestBase { -/** Name of the source index to clone. */ + /** Name of the source index to clone. */ index: IndexName /** Name of the target index to create. */ target: Name - /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ + /** Period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration - /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ + /** Period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration - /** The number of shard copies that must be active before proceeding with the operation. Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). */ + /** The number of shard copies that must be active before proceeding with the operation. + * Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). */ wait_for_active_shards?: WaitForActiveShards /** Aliases for the resulting index. */ aliases?: Record @@ -13316,19 +19039,26 @@ export interface IndicesCloseCloseShardResult { } export interface IndicesCloseRequest extends RequestBase { -/** Comma-separated list or wildcard expression of index names used to limit the request. */ + /** Comma-separated list or wildcard expression of index names used to limit the request. */ index: Indices - /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. */ + /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. + * This behavior applies even if the request targets other open indices. */ allow_no_indices?: boolean - /** Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. */ + /** Type of index that wildcard patterns can match. + * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. + * Supports comma-separated values, such as `open,hidden`. + * Valid values are: `all`, `open`, `closed`, `hidden`, `none`. */ expand_wildcards?: ExpandWildcards /** If `false`, the request returns an error if it targets a missing or closed index. */ ignore_unavailable?: boolean - /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ + /** Period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration - /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ + /** Period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration - /** The number of shard copies that must be active before proceeding with the operation. Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). */ + /** The number of shard copies that must be active before proceeding with the operation. + * Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). */ wait_for_active_shards?: WaitForActiveShards /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, master_timeout?: never, timeout?: never, wait_for_active_shards?: never } @@ -13343,17 +19073,23 @@ export interface IndicesCloseResponse { } export interface IndicesCreateRequest extends RequestBase { -/** Name of the index you wish to create. */ + /** Name of the index you wish to create. */ index: IndexName - /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ + /** Period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration - /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ + /** Period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration - /** The number of shard copies that must be active before proceeding with the operation. Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). */ + /** The number of shard copies that must be active before proceeding with the operation. + * Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). */ wait_for_active_shards?: WaitForActiveShards /** Aliases for the index. */ aliases?: Record - /** Mapping for fields in the index. If specified, this mapping can include: - Field names - Field data types - Mapping parameters */ + /** Mapping for fields in the index. If specified, this mapping can include: + * - Field names + * - Field data types + * - Mapping parameters */ mappings?: MappingTypeMapping /** Configuration options for the index. */ settings?: IndicesIndexSettings @@ -13370,7 +19106,12 @@ export interface IndicesCreateResponse { } export interface IndicesCreateDataStreamRequest extends RequestBase { -/** Name of the data stream, which must meet the following criteria: Lowercase only; Cannot include `\`, `/`, `*`, `?`, `"`, `<`, `>`, `|`, `,`, `#`, `:`, or a space character; Cannot start with `-`, `_`, `+`, or `.ds-`; Cannot be `.` or `..`; Cannot be longer than 255 bytes. Multi-byte characters count towards this limit faster. */ + /** Name of the data stream, which must meet the following criteria: + * Lowercase only; + * Cannot include `\`, `/`, `*`, `?`, `"`, `<`, `>`, `|`, `,`, `#`, `:`, or a space character; + * Cannot start with `-`, `_`, `+`, or `.ds-`; + * Cannot be `.` or `..`; + * Cannot be longer than 255 bytes. Multi-byte characters count towards this limit faster. */ name: DataStreamName /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration @@ -13385,13 +19126,16 @@ export interface IndicesCreateDataStreamRequest extends RequestBase { export type IndicesCreateDataStreamResponse = AcknowledgedResponseBase export interface IndicesCreateFromCreateFrom { + /** Mappings overrides to be applied to the destination index (optional) */ mappings_override?: MappingTypeMapping + /** Settings overrides to be applied to the destination index (optional) */ settings_override?: IndicesIndexSettings + /** If index blocks should be removed when creating destination index (optional) */ remove_index_blocks?: boolean } export interface IndicesCreateFromRequest extends RequestBase { -/** The source index or data stream name */ + /** The source index or data stream name */ source: IndexName /** The destination index or data stream name */ dest: IndexName @@ -13409,17 +19153,30 @@ export interface IndicesCreateFromResponse { } export interface IndicesDataStreamsStatsDataStreamsStatsItem { + /** Current number of backing indices for the data stream. */ backing_indices: integer + /** Name of the data stream. */ data_stream: Name + /** The data stream’s highest `@timestamp` value, converted to milliseconds since the Unix epoch. + * NOTE: This timestamp is provided as a best effort. + * The data stream may contain `@timestamp` values higher than this if one or more of the following conditions are met: + * The stream contains closed backing indices; + * Backing indices with a lower generation contain higher `@timestamp` values. */ maximum_timestamp: EpochTime + /** Total size of all shards for the data stream’s backing indices. + * This parameter is only returned if the `human` query parameter is `true`. */ store_size?: ByteSize + /** Total size, in bytes, of all shards for the data stream’s backing indices. */ store_size_bytes: long } export interface IndicesDataStreamsStatsRequest extends RequestBase { -/** Comma-separated list of data streams used to limit the request. Wildcard expressions (`*`) are supported. To target all data streams in a cluster, omit this parameter or use `*`. */ + /** Comma-separated list of data streams used to limit the request. + * Wildcard expressions (`*`) are supported. + * To target all data streams in a cluster, omit this parameter or use `*`. */ name?: IndexName - /** Type of data stream that wildcard patterns can match. Supports comma-separated values, such as `open,hidden`. */ + /** Type of data stream that wildcard patterns can match. + * Supports comma-separated values, such as `open,hidden`. */ expand_wildcards?: ExpandWildcards /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { name?: never, expand_wildcards?: never } @@ -13428,26 +19185,42 @@ export interface IndicesDataStreamsStatsRequest extends RequestBase { } export interface IndicesDataStreamsStatsResponse { + /** Contains information about shards that attempted to execute the request. */ _shards: ShardStatistics + /** Total number of backing indices for the selected data streams. */ backing_indices: integer + /** Total number of selected data streams. */ data_stream_count: integer + /** Contains statistics for the selected data streams. */ data_streams: IndicesDataStreamsStatsDataStreamsStatsItem[] + /** Total size of all shards for the selected data streams. + * This property is included only if the `human` query parameter is `true` */ total_store_sizes?: ByteSize + /** Total size, in bytes, of all shards for the selected data streams. */ total_store_size_bytes: long } export interface IndicesDeleteRequest extends RequestBase { -/** Comma-separated list of indices to delete. You cannot specify index aliases. By default, this parameter does not support wildcards (`*`) or `_all`. To use wildcards or `_all`, set the `action.destructive_requires_name` cluster setting to `false`. */ + /** Comma-separated list of indices to delete. + * You cannot specify index aliases. + * By default, this parameter does not support wildcards (`*`) or `_all`. + * To use wildcards or `_all`, set the `action.destructive_requires_name` cluster setting to `false`. */ index: Indices - /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. */ + /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. + * This behavior applies even if the request targets other open indices. */ allow_no_indices?: boolean - /** Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. */ + /** Type of index that wildcard patterns can match. + * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. + * Supports comma-separated values, such as `open,hidden`. + * Valid values are: `all`, `open`, `closed`, `hidden`, `none`. */ expand_wildcards?: ExpandWildcards /** If `false`, the request returns an error if it targets a missing or closed index. */ ignore_unavailable?: boolean - /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ + /** Period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration - /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ + /** Period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, master_timeout?: never, timeout?: never } @@ -13458,13 +19231,17 @@ export interface IndicesDeleteRequest extends RequestBase { export type IndicesDeleteResponse = IndicesResponseBase export interface IndicesDeleteAliasRequest extends RequestBase { -/** Comma-separated list of data streams or indices used to limit the request. Supports wildcards (`*`). */ + /** Comma-separated list of data streams or indices used to limit the request. + * Supports wildcards (`*`). */ index: Indices - /** Comma-separated list of aliases to remove. Supports wildcards (`*`). To remove all aliases, use `*` or `_all`. */ + /** Comma-separated list of aliases to remove. + * Supports wildcards (`*`). To remove all aliases, use `*` or `_all`. */ name: Names - /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ + /** Period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration - /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ + /** Period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { index?: never, name?: never, master_timeout?: never, timeout?: never } @@ -13475,7 +19252,7 @@ export interface IndicesDeleteAliasRequest extends RequestBase { export type IndicesDeleteAliasResponse = AcknowledgedResponseBase export interface IndicesDeleteDataLifecycleRequest extends RequestBase { -/** A comma-separated list of data streams of which the data stream lifecycle will be deleted; use `*` to get all data streams */ + /** A comma-separated list of data streams of which the data stream lifecycle will be deleted; use `*` to get all data streams */ name: DataStreamNames /** Whether wildcard expressions should get expanded to open or closed indices (default: open) */ expand_wildcards?: ExpandWildcards @@ -13492,7 +19269,7 @@ export interface IndicesDeleteDataLifecycleRequest extends RequestBase { export type IndicesDeleteDataLifecycleResponse = AcknowledgedResponseBase export interface IndicesDeleteDataStreamRequest extends RequestBase { -/** Comma-separated list of data streams to delete. Wildcard (`*`) expressions are supported. */ + /** Comma-separated list of data streams to delete. Wildcard (`*`) expressions are supported. */ name: DataStreamNames /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration @@ -13507,7 +19284,7 @@ export interface IndicesDeleteDataStreamRequest extends RequestBase { export type IndicesDeleteDataStreamResponse = AcknowledgedResponseBase export interface IndicesDeleteIndexTemplateRequest extends RequestBase { -/** Comma-separated list of index template names used to limit the request. Wildcard (*) expressions are supported. */ + /** Comma-separated list of index template names used to limit the request. Wildcard (*) expressions are supported. */ name: Names /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration @@ -13522,11 +19299,14 @@ export interface IndicesDeleteIndexTemplateRequest extends RequestBase { export type IndicesDeleteIndexTemplateResponse = AcknowledgedResponseBase export interface IndicesDeleteTemplateRequest extends RequestBase { -/** The name of the legacy index template to delete. Wildcard (`*`) expressions are supported. */ + /** The name of the legacy index template to delete. + * Wildcard (`*`) expressions are supported. */ name: Name - /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ + /** Period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration - /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ + /** Period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { name?: never, master_timeout?: never, timeout?: never } @@ -13537,17 +19317,24 @@ export interface IndicesDeleteTemplateRequest extends RequestBase { export type IndicesDeleteTemplateResponse = AcknowledgedResponseBase export interface IndicesDiskUsageRequest extends RequestBase { -/** Comma-separated list of data streams, indices, and aliases used to limit the request. It’s recommended to execute this API with a single index (or the latest backing index of a data stream) as the API consumes resources significantly. */ + /** Comma-separated list of data streams, indices, and aliases used to limit the request. + * It’s recommended to execute this API with a single index (or the latest backing index of a data stream) as the API consumes resources significantly. */ index: Indices - /** If false, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. */ + /** If false, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. + * This behavior applies even if the request targets other open indices. + * For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. */ allow_no_indices?: boolean - /** Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`. */ + /** Type of index that wildcard patterns can match. + * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. + * Supports comma-separated values, such as `open,hidden`. */ expand_wildcards?: ExpandWildcards - /** If `true`, the API performs a flush before analysis. If `false`, the response may not include uncommitted data. */ + /** If `true`, the API performs a flush before analysis. + * If `false`, the response may not include uncommitted data. */ flush?: boolean /** If `true`, missing or closed indices are not included in the response. */ ignore_unavailable?: boolean - /** Analyzing field disk usage is resource-intensive. To use the API, this parameter must be set to `true`. */ + /** Analyzing field disk usage is resource-intensive. + * To use the API, this parameter must be set to `true`. */ run_expensive_tasks?: boolean /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, flush?: never, ignore_unavailable?: never, run_expensive_tasks?: never } @@ -13558,7 +19345,7 @@ export interface IndicesDiskUsageRequest extends RequestBase { export type IndicesDiskUsageResponse = any export interface IndicesDownsampleRequest extends RequestBase { -/** Name of the time series index to downsample. */ + /** Name of the time series index to downsample. */ index: IndexName /** Name of the index to create. */ target_index: IndexName @@ -13572,11 +19359,15 @@ export interface IndicesDownsampleRequest extends RequestBase { export type IndicesDownsampleResponse = any export interface IndicesExistsRequest extends RequestBase { -/** Comma-separated list of data streams, indices, and aliases. Supports wildcards (`*`). */ + /** Comma-separated list of data streams, indices, and aliases. Supports wildcards (`*`). */ index: Indices - /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. */ + /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. + * This behavior applies even if the request targets other open indices. */ allow_no_indices?: boolean - /** Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. */ + /** Type of index that wildcard patterns can match. + * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. + * Supports comma-separated values, such as `open,hidden`. + * Valid values are: `all`, `open`, `closed`, `hidden`, `none`. */ expand_wildcards?: ExpandWildcards /** If `true`, returns settings in flat format. */ flat_settings?: boolean @@ -13595,17 +19386,23 @@ export interface IndicesExistsRequest extends RequestBase { export type IndicesExistsResponse = boolean export interface IndicesExistsAliasRequest extends RequestBase { -/** Comma-separated list of aliases to check. Supports wildcards (`*`). */ + /** Comma-separated list of aliases to check. Supports wildcards (`*`). */ name: Names - /** Comma-separated list of data streams or indices used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. */ + /** Comma-separated list of data streams or indices used to limit the request. Supports wildcards (`*`). + * To target all data streams and indices, omit this parameter or use `*` or `_all`. */ index?: Indices - /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. */ + /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. + * This behavior applies even if the request targets other open indices. */ allow_no_indices?: boolean - /** Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. */ + /** Type of index that wildcard patterns can match. + * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. + * Supports comma-separated values, such as `open,hidden`. + * Valid values are: `all`, `open`, `closed`, `hidden`, `none`. */ expand_wildcards?: ExpandWildcards /** If `false`, requests that include a missing data stream or index in the target indices or data streams return an error. */ ignore_unavailable?: boolean - /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ + /** Period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { name?: never, index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, master_timeout?: never } @@ -13616,26 +19413,33 @@ export interface IndicesExistsAliasRequest extends RequestBase { export type IndicesExistsAliasResponse = boolean export interface IndicesExistsIndexTemplateRequest extends RequestBase { -/** Comma-separated list of index template names used to limit the request. Wildcard (*) expressions are supported. */ + /** Comma-separated list of index template names used to limit the request. Wildcard (*) expressions are supported. */ name: Name + /** If true, the request retrieves information from the local node only. Defaults to false, which means information is retrieved from the master node. */ + local?: boolean + /** If true, returns settings in flat format. */ + flat_settings?: boolean /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { name?: never, master_timeout?: never } + body?: string | { [key: string]: any } & { name?: never, local?: never, flat_settings?: never, master_timeout?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { name?: never, master_timeout?: never } + querystring?: { [key: string]: any } & { name?: never, local?: never, flat_settings?: never, master_timeout?: never } } export type IndicesExistsIndexTemplateResponse = boolean export interface IndicesExistsTemplateRequest extends RequestBase { -/** A comma-separated list of index template names used to limit the request. Wildcard (`*`) expressions are supported. */ + /** A comma-separated list of index template names used to limit the request. + * Wildcard (`*`) expressions are supported. */ name: Names /** Indicates whether to use a flat format for the response. */ flat_settings?: boolean /** Indicates whether to get information from the local node only. */ local?: boolean - /** The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. */ + /** The period to wait for the master node. + * If the master node is not available before the timeout expires, the request fails and returns an error. + * To indicate that the request should never timeout, set it to `-1`. */ master_timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { name?: never, flat_settings?: never, local?: never, master_timeout?: never } @@ -13658,7 +19462,7 @@ export interface IndicesExplainDataLifecycleDataStreamLifecycleExplain { } export interface IndicesExplainDataLifecycleRequest extends RequestBase { -/** The name of the index to explain */ + /** The name of the index to explain */ index: Indices /** indicates if the API should return the default values the system uses for the index's lifecycle */ include_defaults?: boolean @@ -13702,22 +19506,24 @@ export interface IndicesFieldUsageStatsInvertedIndex { } export interface IndicesFieldUsageStatsRequest extends RequestBase { -/** Comma-separated list or wildcard expression of index names used to limit the request. */ + /** Comma-separated list or wildcard expression of index names used to limit the request. */ index: Indices - /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. */ + /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. + * This behavior applies even if the request targets other open indices. + * For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. */ allow_no_indices?: boolean - /** Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`. */ + /** Type of index that wildcard patterns can match. + * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. + * Supports comma-separated values, such as `open,hidden`. */ expand_wildcards?: ExpandWildcards /** If `true`, missing or closed indices are not included in the response. */ ignore_unavailable?: boolean /** Comma-separated list or wildcard expressions of fields to include in the statistics. */ fields?: Fields - /** The number of shard copies that must be active before proceeding with the operation. Set to all or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). */ - wait_for_active_shards?: WaitForActiveShards /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, fields?: never, wait_for_active_shards?: never } + body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, fields?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, fields?: never, wait_for_active_shards?: never } + querystring?: { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, fields?: never } } export type IndicesFieldUsageStatsResponse = IndicesFieldUsageStatsFieldsUsageBody @@ -13739,17 +19545,24 @@ export interface IndicesFieldUsageStatsUsageStatsShards { } export interface IndicesFlushRequest extends RequestBase { -/** Comma-separated list of data streams, indices, and aliases to flush. Supports wildcards (`*`). To flush all data streams and indices, omit this parameter or use `*` or `_all`. */ + /** Comma-separated list of data streams, indices, and aliases to flush. + * Supports wildcards (`*`). + * To flush all data streams and indices, omit this parameter or use `*` or `_all`. */ index?: Indices - /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. */ + /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. + * This behavior applies even if the request targets other open indices. */ allow_no_indices?: boolean - /** Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. */ + /** Type of index that wildcard patterns can match. + * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. + * Supports comma-separated values, such as `open,hidden`. + * Valid values are: `all`, `open`, `closed`, `hidden`, `none`. */ expand_wildcards?: ExpandWildcards /** If `true`, the request forces a flush even if there are no changes to commit to the index. */ force?: boolean /** If `false`, the request returns an error if it targets a missing or closed index. */ ignore_unavailable?: boolean - /** If `true`, the flush operation blocks until execution when another flush operation is running. If `false`, Elasticsearch returns an error if you request a flush when another flush operation is running. */ + /** If `true`, the flush operation blocks until execution when another flush operation is running. + * If `false`, Elasticsearch returns an error if you request a flush when another flush operation is running. */ wait_if_ongoing?: boolean /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, force?: never, ignore_unavailable?: never, wait_if_ongoing?: never } @@ -13760,7 +19573,7 @@ export interface IndicesFlushRequest extends RequestBase { export type IndicesFlushResponse = ShardsOperationResponseBase export interface IndicesForcemergeRequest extends RequestBase { -/** A comma-separated list of index names; use `_all` or empty string to perform the operation on all indices */ + /** A comma-separated list of index names; use `_all` or empty string to perform the operation on all indices */ index?: Indices /** Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) */ allow_no_indices?: boolean @@ -13785,6 +19598,8 @@ export interface IndicesForcemergeRequest extends RequestBase { export type IndicesForcemergeResponse = IndicesForcemergeForceMergeResponseBody export interface IndicesForcemergeForceMergeResponseBody extends ShardsOperationResponseBase { + /** task contains a task id returned when wait_for_completion=false, + * you can use the task_id to get the status of the task at _tasks/ */ task?: string } @@ -13793,11 +19608,16 @@ export type IndicesGetFeature = 'aliases' | 'mappings' | 'settings' export type IndicesGetFeatures = IndicesGetFeature | IndicesGetFeature[] export interface IndicesGetRequest extends RequestBase { -/** Comma-separated list of data streams, indices, and index aliases used to limit the request. Wildcard expressions (*) are supported. */ + /** Comma-separated list of data streams, indices, and index aliases used to limit the request. + * Wildcard expressions (*) are supported. */ index: Indices - /** If false, the request returns an error if any wildcard expression, index alias, or _all value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting foo*,bar* returns an error if an index starts with foo but no index starts with bar. */ + /** If false, the request returns an error if any wildcard expression, index alias, or _all value targets only + * missing or closed indices. This behavior applies even if the request targets other open indices. For example, + * a request targeting foo*,bar* returns an error if an index starts with foo but no index starts with bar. */ allow_no_indices?: boolean - /** Type of index that wildcard expressions can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as open,hidden. */ + /** Type of index that wildcard expressions can match. If the request can target data streams, this argument + * determines whether wildcard expressions match hidden data streams. Supports comma-separated values, + * such as open,hidden. */ expand_wildcards?: ExpandWildcards /** If true, returns settings in flat format. */ flat_settings?: boolean @@ -13824,17 +19644,26 @@ export interface IndicesGetAliasIndexAliases { } export interface IndicesGetAliasRequest extends RequestBase { -/** Comma-separated list of aliases to retrieve. Supports wildcards (`*`). To retrieve all aliases, omit this parameter or use `*` or `_all`. */ + /** Comma-separated list of aliases to retrieve. + * Supports wildcards (`*`). + * To retrieve all aliases, omit this parameter or use `*` or `_all`. */ name?: Names - /** Comma-separated list of data streams or indices used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. */ + /** Comma-separated list of data streams or indices used to limit the request. + * Supports wildcards (`*`). + * To target all data streams and indices, omit this parameter or use `*` or `_all`. */ index?: Indices - /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. */ + /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. + * This behavior applies even if the request targets other open indices. */ allow_no_indices?: boolean - /** Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. */ + /** Type of index that wildcard patterns can match. + * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. + * Supports comma-separated values, such as `open,hidden`. + * Valid values are: `all`, `open`, `closed`, `hidden`, `none`. */ expand_wildcards?: ExpandWildcards /** If `false`, the request returns an error if it targets a missing or closed index. */ ignore_unavailable?: boolean - /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ + /** Period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { name?: never, index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, master_timeout?: never } @@ -13850,9 +19679,13 @@ export interface IndicesGetDataLifecycleDataStreamWithLifecycle { } export interface IndicesGetDataLifecycleRequest extends RequestBase { -/** Comma-separated list of data streams to limit the request. Supports wildcards (`*`). To target all data streams, omit this parameter or use `*` or `_all`. */ + /** Comma-separated list of data streams to limit the request. + * Supports wildcards (`*`). + * To target all data streams, omit this parameter or use `*` or `_all`. */ name: DataStreamNames - /** Type of data stream that wildcard patterns can match. Supports comma-separated values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. */ + /** Type of data stream that wildcard patterns can match. + * Supports comma-separated values, such as `open,hidden`. + * Valid values are: `all`, `open`, `closed`, `hidden`, `none`. */ expand_wildcards?: ExpandWildcards /** If `true`, return all default settings in the response. */ include_defaults?: boolean @@ -13869,8 +19702,11 @@ export interface IndicesGetDataLifecycleResponse { } export interface IndicesGetDataLifecycleStatsDataStreamStats { + /** The count of the backing indices for the data stream. */ backing_indices_in_error: integer + /** The count of the backing indices for the data stream that have encountered an error. */ backing_indices_in_total: integer + /** The name of the data stream. */ name: DataStreamName } @@ -13882,16 +19718,23 @@ export interface IndicesGetDataLifecycleStatsRequest extends RequestBase { } export interface IndicesGetDataLifecycleStatsResponse { + /** The count of data streams currently being managed by the data stream lifecycle. */ data_stream_count: integer + /** Information about the data streams that are managed by the data stream lifecycle. */ data_streams: IndicesGetDataLifecycleStatsDataStreamStats[] + /** The duration of the last data stream lifecycle execution. */ last_run_duration_in_millis?: DurationValue + /** The time that passed between the start of the last two data stream lifecycle executions. + * This value should amount approximately to `data_streams.lifecycle.poll_interval`. */ time_between_starts_in_millis?: DurationValue } export interface IndicesGetDataStreamRequest extends RequestBase { -/** Comma-separated list of data stream names used to limit the request. Wildcard (`*`) expressions are supported. If omitted, all data streams are returned. */ + /** Comma-separated list of data stream names used to limit the request. + * Wildcard (`*`) expressions are supported. If omitted, all data streams are returned. */ name?: DataStreamNames - /** Type of data stream that wildcard patterns can match. Supports comma-separated values, such as `open,hidden`. */ + /** Type of data stream that wildcard patterns can match. + * Supports comma-separated values, such as `open,hidden`. */ expand_wildcards?: ExpandWildcards /** If true, returns all relevant default configurations for the index template. */ include_defaults?: boolean @@ -13910,22 +19753,31 @@ export interface IndicesGetDataStreamResponse { } export interface IndicesGetFieldMappingRequest extends RequestBase { -/** Comma-separated list or wildcard expression of fields used to limit returned information. Supports wildcards (`*`). */ + /** Comma-separated list or wildcard expression of fields used to limit returned information. + * Supports wildcards (`*`). */ fields: Fields - /** Comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. */ + /** Comma-separated list of data streams, indices, and aliases used to limit the request. + * Supports wildcards (`*`). + * To target all data streams and indices, omit this parameter or use `*` or `_all`. */ index?: Indices - /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. */ + /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. + * This behavior applies even if the request targets other open indices. */ allow_no_indices?: boolean - /** Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. */ + /** Type of index that wildcard patterns can match. + * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. + * Supports comma-separated values, such as `open,hidden`. + * Valid values are: `all`, `open`, `closed`, `hidden`, `none`. */ expand_wildcards?: ExpandWildcards /** If `false`, the request returns an error if it targets a missing or closed index. */ ignore_unavailable?: boolean /** If `true`, return all default settings in the response. */ include_defaults?: boolean + /** If `true`, the request retrieves information from the local node only. */ + local?: boolean /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { fields?: never, index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, include_defaults?: never } + body?: string | { [key: string]: any } & { fields?: never, index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, include_defaults?: never, local?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { fields?: never, index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, include_defaults?: never } + querystring?: { [key: string]: any } & { fields?: never, index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, include_defaults?: never, local?: never } } export type IndicesGetFieldMappingResponse = Record @@ -13940,7 +19792,7 @@ export interface IndicesGetIndexTemplateIndexTemplateItem { } export interface IndicesGetIndexTemplateRequest extends RequestBase { -/** Comma-separated list of index template names used to limit the request. Wildcard (*) expressions are supported. */ + /** Comma-separated list of index template names used to limit the request. Wildcard (*) expressions are supported. */ name?: Name /** If true, the request retrieves information from the local node only. Defaults to false, which means information is retrieved from the master node. */ local?: boolean @@ -13966,17 +19818,24 @@ export interface IndicesGetMappingIndexMappingRecord { } export interface IndicesGetMappingRequest extends RequestBase { -/** Comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. */ + /** Comma-separated list of data streams, indices, and aliases used to limit the request. + * Supports wildcards (`*`). + * To target all data streams and indices, omit this parameter or use `*` or `_all`. */ index?: Indices - /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. */ + /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. + * This behavior applies even if the request targets other open indices. */ allow_no_indices?: boolean - /** Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. */ + /** Type of index that wildcard patterns can match. + * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. + * Supports comma-separated values, such as `open,hidden`. + * Valid values are: `all`, `open`, `closed`, `hidden`, `none`. */ expand_wildcards?: ExpandWildcards /** If `false`, the request returns an error if it targets a missing or closed index. */ ignore_unavailable?: boolean /** If `true`, the request retrieves information from the local node only. */ local?: boolean - /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ + /** Period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, local?: never, master_timeout?: never } @@ -13987,7 +19846,7 @@ export interface IndicesGetMappingRequest extends RequestBase { export type IndicesGetMappingResponse = Record export interface IndicesGetMigrateReindexStatusRequest extends RequestBase { -/** The index or data stream name. */ + /** The index or data stream name. */ index: Indices /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { index?: never } @@ -14020,13 +19879,21 @@ export interface IndicesGetMigrateReindexStatusStatusInProgress { } export interface IndicesGetSettingsRequest extends RequestBase { -/** Comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. */ + /** Comma-separated list of data streams, indices, and aliases used to limit + * the request. Supports wildcards (`*`). To target all data streams and + * indices, omit this parameter or use `*` or `_all`. */ index?: Indices /** Comma-separated list or wildcard expression of settings to retrieve. */ name?: Names - /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with foo but no index starts with `bar`. */ + /** If `false`, the request returns an error if any wildcard expression, index + * alias, or `_all` value targets only missing or closed indices. This + * behavior applies even if the request targets other open indices. For + * example, a request targeting `foo*,bar*` returns an error if an index + * starts with foo but no index starts with `bar`. */ allow_no_indices?: boolean - /** Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`. */ + /** Type of index that wildcard patterns can match. + * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. + * Supports comma-separated values, such as `open,hidden`. */ expand_wildcards?: ExpandWildcards /** If `true`, returns settings in flat format. */ flat_settings?: boolean @@ -14034,9 +19901,12 @@ export interface IndicesGetSettingsRequest extends RequestBase { ignore_unavailable?: boolean /** If `true`, return all default settings in the response. */ include_defaults?: boolean - /** If `true`, the request retrieves information from the local node only. If `false`, information is retrieved from the master node. */ + /** If `true`, the request retrieves information from the local node only. If + * `false`, information is retrieved from the master node. */ local?: boolean - /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ + /** Period to wait for a connection to the master node. If no response is + * received before the timeout expires, the request fails and returns an + * error. */ master_timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { index?: never, name?: never, allow_no_indices?: never, expand_wildcards?: never, flat_settings?: never, ignore_unavailable?: never, include_defaults?: never, local?: never, master_timeout?: never } @@ -14047,13 +19917,16 @@ export interface IndicesGetSettingsRequest extends RequestBase { export type IndicesGetSettingsResponse = Record export interface IndicesGetTemplateRequest extends RequestBase { -/** Comma-separated list of index template names used to limit the request. Wildcard (`*`) expressions are supported. To return all index templates, omit this parameter or use a value of `_all` or `*`. */ + /** Comma-separated list of index template names used to limit the request. + * Wildcard (`*`) expressions are supported. + * To return all index templates, omit this parameter or use a value of `_all` or `*`. */ name?: Names /** If `true`, returns settings in flat format. */ flat_settings?: boolean /** If `true`, the request retrieves information from the local node only. */ local?: boolean - /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ + /** Period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { name?: never, flat_settings?: never, local?: never, master_timeout?: never } @@ -14064,7 +19937,9 @@ export interface IndicesGetTemplateRequest extends RequestBase { export type IndicesGetTemplateResponse = Record export interface IndicesMigrateReindexMigrateReindex { + /** Reindex mode. Currently only 'upgrade' is supported. */ mode: IndicesMigrateReindexModeEnum + /** The source index or data stream (only data streams are currently supported). */ source: IndicesMigrateReindexSourceIndex } @@ -14085,7 +19960,7 @@ export interface IndicesMigrateReindexSourceIndex { } export interface IndicesMigrateToDataStreamRequest extends RequestBase { -/** Name of the index alias to convert to a data stream. */ + /** Name of the index alias to convert to a data stream. */ name: IndexName /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration @@ -14100,17 +19975,26 @@ export interface IndicesMigrateToDataStreamRequest extends RequestBase { export type IndicesMigrateToDataStreamResponse = AcknowledgedResponseBase export interface IndicesModifyDataStreamAction { + /** Adds an existing index as a backing index for a data stream. + * The index is hidden as part of this operation. + * WARNING: Adding indices with the `add_backing_index` action can potentially result in improper data stream behavior. + * This should be considered an expert level API. */ add_backing_index?: IndicesModifyDataStreamIndexAndDataStreamAction + /** Removes a backing index from a data stream. + * The index is unhidden as part of this operation. + * A data stream’s write index cannot be removed. */ remove_backing_index?: IndicesModifyDataStreamIndexAndDataStreamAction } export interface IndicesModifyDataStreamIndexAndDataStreamAction { + /** Data stream targeted by the action. */ data_stream: DataStreamName + /** Index for the action. */ index: IndexName } export interface IndicesModifyDataStreamRequest extends RequestBase { -/** Actions to perform. */ + /** Actions to perform. */ actions: IndicesModifyDataStreamAction[] /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { actions?: never } @@ -14121,19 +20005,30 @@ export interface IndicesModifyDataStreamRequest extends RequestBase { export type IndicesModifyDataStreamResponse = AcknowledgedResponseBase export interface IndicesOpenRequest extends RequestBase { -/** Comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). By default, you must explicitly name the indices you using to limit the request. To limit a request using `_all`, `*`, or other wildcard expressions, change the `action.destructive_requires_name` setting to false. You can update this setting in the `elasticsearch.yml` file or using the cluster update settings API. */ + /** Comma-separated list of data streams, indices, and aliases used to limit the request. + * Supports wildcards (`*`). + * By default, you must explicitly name the indices you using to limit the request. + * To limit a request using `_all`, `*`, or other wildcard expressions, change the `action.destructive_requires_name` setting to false. + * You can update this setting in the `elasticsearch.yml` file or using the cluster update settings API. */ index: Indices - /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. */ + /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. + * This behavior applies even if the request targets other open indices. */ allow_no_indices?: boolean - /** Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. */ + /** Type of index that wildcard patterns can match. + * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. + * Supports comma-separated values, such as `open,hidden`. + * Valid values are: `all`, `open`, `closed`, `hidden`, `none`. */ expand_wildcards?: ExpandWildcards /** If `false`, the request returns an error if it targets a missing or closed index. */ ignore_unavailable?: boolean - /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ + /** Period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration - /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ + /** Period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration - /** The number of shard copies that must be active before proceeding with the operation. Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). */ + /** The number of shard copies that must be active before proceeding with the operation. + * Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). */ wait_for_active_shards?: WaitForActiveShards /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, master_timeout?: never, timeout?: never, wait_for_active_shards?: never } @@ -14147,7 +20042,7 @@ export interface IndicesOpenResponse { } export interface IndicesPromoteDataStreamRequest extends RequestBase { -/** The name of the data stream */ + /** The name of the data stream */ name: IndexName /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration @@ -14160,23 +20055,37 @@ export interface IndicesPromoteDataStreamRequest extends RequestBase { export type IndicesPromoteDataStreamResponse = any export interface IndicesPutAliasRequest extends RequestBase { -/** Comma-separated list of data streams or indices to add. Supports wildcards (`*`). Wildcard patterns that match both data streams and indices return an error. */ + /** Comma-separated list of data streams or indices to add. + * Supports wildcards (`*`). + * Wildcard patterns that match both data streams and indices return an error. */ index: Indices - /** Alias to update. If the alias doesn’t exist, the request creates it. Index alias names support date math. */ + /** Alias to update. + * If the alias doesn’t exist, the request creates it. + * Index alias names support date math. */ name: Name - /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ + /** Period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration - /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ + /** Period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration /** Query used to limit documents the alias can access. */ filter?: QueryDslQueryContainer - /** Value used to route indexing operations to a specific shard. If specified, this overwrites the `routing` value for indexing operations. Data stream aliases don’t support this parameter. */ + /** Value used to route indexing operations to a specific shard. + * If specified, this overwrites the `routing` value for indexing operations. + * Data stream aliases don’t support this parameter. */ index_routing?: Routing - /** If `true`, sets the write index or data stream for the alias. If an alias points to multiple indices or data streams and `is_write_index` isn’t set, the alias rejects write requests. If an index alias points to one index and `is_write_index` isn’t set, the index automatically acts as the write index. Data stream aliases don’t automatically set a write data stream, even if the alias points to one data stream. */ + /** If `true`, sets the write index or data stream for the alias. + * If an alias points to multiple indices or data streams and `is_write_index` isn’t set, the alias rejects write requests. + * If an index alias points to one index and `is_write_index` isn’t set, the index automatically acts as the write index. + * Data stream aliases don’t automatically set a write data stream, even if the alias points to one data stream. */ is_write_index?: boolean - /** Value used to route indexing and search operations to a specific shard. Data stream aliases don’t support this parameter. */ + /** Value used to route indexing and search operations to a specific shard. + * Data stream aliases don’t support this parameter. */ routing?: Routing - /** Value used to route search operations to a specific shard. If specified, this overwrites the `routing` value for search operations. Data stream aliases don’t support this parameter. */ + /** Value used to route search operations to a specific shard. + * If specified, this overwrites the `routing` value for search operations. + * Data stream aliases don’t support this parameter. */ search_routing?: Routing /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { index?: never, name?: never, master_timeout?: never, timeout?: never, filter?: never, index_routing?: never, is_write_index?: never, routing?: never, search_routing?: never } @@ -14187,19 +20096,29 @@ export interface IndicesPutAliasRequest extends RequestBase { export type IndicesPutAliasResponse = AcknowledgedResponseBase export interface IndicesPutDataLifecycleRequest extends RequestBase { -/** Comma-separated list of data streams used to limit the request. Supports wildcards (`*`). To target all data streams use `*` or `_all`. */ + /** Comma-separated list of data streams used to limit the request. + * Supports wildcards (`*`). + * To target all data streams use `*` or `_all`. */ name: DataStreamNames - /** Type of data stream that wildcard patterns can match. Supports comma-separated values, such as `open,hidden`. Valid values are: `all`, `hidden`, `open`, `closed`, `none`. */ + /** Type of data stream that wildcard patterns can match. + * Supports comma-separated values, such as `open,hidden`. + * Valid values are: `all`, `hidden`, `open`, `closed`, `none`. */ expand_wildcards?: ExpandWildcards - /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ + /** Period to wait for a connection to the master node. If no response is + * received before the timeout expires, the request fails and returns an + * error. */ master_timeout?: Duration - /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ + /** Period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration - /** If defined, every document added to this data stream will be stored at least for this time frame. Any time after this duration the document could be deleted. When empty, every document in this data stream will be stored indefinitely. */ + /** If defined, every document added to this data stream will be stored at least for this time frame. + * Any time after this duration the document could be deleted. + * When empty, every document in this data stream will be stored indefinitely. */ data_retention?: Duration /** The downsampling configuration to execute for the managed backing index after rollover. */ downsampling?: IndicesDataStreamLifecycleDownsampling - /** If defined, it turns data stream lifecycle on/off (`true`/`false`) for this data stream. A data stream lifecycle that's disabled (enabled: `false`) will have no effect on the data stream. */ + /** If defined, it turns data stream lifecycle on/off (`true`/`false`) for this data stream. A data stream lifecycle + * that's disabled (enabled: `false`) will have no effect on the data stream. */ enabled?: boolean /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { name?: never, expand_wildcards?: never, master_timeout?: never, timeout?: never, data_retention?: never, downsampling?: never, enabled?: never } @@ -14210,40 +20129,66 @@ export interface IndicesPutDataLifecycleRequest extends RequestBase { export type IndicesPutDataLifecycleResponse = AcknowledgedResponseBase export interface IndicesPutIndexTemplateIndexTemplateMapping { + /** Aliases to add. + * If the index template includes a `data_stream` object, these are data stream aliases. + * Otherwise, these are index aliases. + * Data stream aliases ignore the `index_routing`, `routing`, and `search_routing` options. */ aliases?: Record + /** Mapping for fields in the index. + * If specified, this mapping can include field names, field data types, and mapping parameters. */ mappings?: MappingTypeMapping + /** Configuration options for the index. */ settings?: IndicesIndexSettings lifecycle?: IndicesDataStreamLifecycle } export interface IndicesPutIndexTemplateRequest extends RequestBase { -/** Index or template name */ + /** Index or template name */ name: Name /** If `true`, this request cannot replace or update existing index templates. */ create?: boolean - /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ + /** Period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration /** User defined reason for creating/updating the index template */ cause?: string /** Name of the index template to create. */ index_patterns?: Indices - /** An ordered list of component template names. Component templates are merged in the order specified, meaning that the last component template specified has the highest precedence. */ + /** An ordered list of component template names. + * Component templates are merged in the order specified, meaning that the last component template specified has the highest precedence. */ composed_of?: Name[] - /** Template to be applied. It may optionally include an `aliases`, `mappings`, or `settings` configuration. */ + /** Template to be applied. + * It may optionally include an `aliases`, `mappings`, or `settings` configuration. */ template?: IndicesPutIndexTemplateIndexTemplateMapping - /** If this object is included, the template is used to create data streams and their backing indices. Supports an empty object. Data streams require a matching index template with a `data_stream` object. */ + /** If this object is included, the template is used to create data streams and their backing indices. + * Supports an empty object. + * Data streams require a matching index template with a `data_stream` object. */ data_stream?: IndicesDataStreamVisibility - /** Priority to determine index template precedence when a new data stream or index is created. The index template with the highest priority is chosen. If no priority is specified the template is treated as though it is of priority 0 (lowest priority). This number is not automatically generated by Elasticsearch. */ + /** Priority to determine index template precedence when a new data stream or index is created. + * The index template with the highest priority is chosen. + * If no priority is specified the template is treated as though it is of priority 0 (lowest priority). + * This number is not automatically generated by Elasticsearch. */ priority?: long - /** Version number used to manage index templates externally. This number is not automatically generated by Elasticsearch. External systems can use these version numbers to simplify template management. To unset a version, replace the template without specifying one. */ + /** Version number used to manage index templates externally. + * This number is not automatically generated by Elasticsearch. + * External systems can use these version numbers to simplify template management. + * To unset a version, replace the template without specifying one. */ version?: VersionNumber - /** Optional user metadata about the index template. It may have any contents. It is not automatically generated or used by Elasticsearch. This user-defined object is stored in the cluster state, so keeping it short is preferable To unset the metadata, replace the template without specifying it. */ + /** Optional user metadata about the index template. + * It may have any contents. + * It is not automatically generated or used by Elasticsearch. + * This user-defined object is stored in the cluster state, so keeping it short is preferable + * To unset the metadata, replace the template without specifying it. */ _meta?: Metadata - /** This setting overrides the value of the `action.auto_create_index` cluster setting. If set to `true` in a template, then indices can be automatically created using that template even if auto-creation of indices is disabled via `actions.auto_create_index`. If set to `false`, then indices or data streams matching the template must always be explicitly created, and may never be automatically created. */ + /** This setting overrides the value of the `action.auto_create_index` cluster setting. + * If set to `true` in a template, then indices can be automatically created using that template even if auto-creation of indices is disabled via `actions.auto_create_index`. + * If set to `false`, then indices or data streams matching the template must always be explicitly created, and may never be automatically created. */ allow_auto_create?: boolean - /** The configuration option ignore_missing_component_templates can be used when an index template references a component template that might not exist */ + /** The configuration option ignore_missing_component_templates can be used when an index template + * references a component template that might not exist */ ignore_missing_component_templates?: string[] - /** Marks this index template as deprecated. When creating or updating a non-deprecated index template that uses deprecated components, Elasticsearch will emit a deprecation warning. */ + /** Marks this index template as deprecated. When creating or updating a non-deprecated index template + * that uses deprecated components, Elasticsearch will emit a deprecation warning. */ deprecated?: boolean /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { name?: never, create?: never, master_timeout?: never, cause?: never, index_patterns?: never, composed_of?: never, template?: never, data_stream?: never, priority?: never, version?: never, _meta?: never, allow_auto_create?: never, ignore_missing_component_templates?: never, deprecated?: never } @@ -14254,17 +20199,23 @@ export interface IndicesPutIndexTemplateRequest extends RequestBase { export type IndicesPutIndexTemplateResponse = AcknowledgedResponseBase export interface IndicesPutMappingRequest extends RequestBase { -/** A comma-separated list of index names the mapping should be added to (supports wildcards); use `_all` or omit to add the mapping on all indices. */ + /** A comma-separated list of index names the mapping should be added to (supports wildcards); use `_all` or omit to add the mapping on all indices. */ index: Indices - /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. */ + /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. + * This behavior applies even if the request targets other open indices. */ allow_no_indices?: boolean - /** Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. */ + /** Type of index that wildcard patterns can match. + * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. + * Supports comma-separated values, such as `open,hidden`. + * Valid values are: `all`, `open`, `closed`, `hidden`, `none`. */ expand_wildcards?: ExpandWildcards /** If `false`, the request returns an error if it targets a missing or closed index. */ ignore_unavailable?: boolean - /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ + /** Period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration - /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ + /** Period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration /** If `true`, the mappings are applied only to the current write index for the target. */ write_index_only?: boolean @@ -14272,17 +20223,25 @@ export interface IndicesPutMappingRequest extends RequestBase { date_detection?: boolean /** Controls whether new fields are added dynamically. */ dynamic?: MappingDynamicMapping - /** If date detection is enabled then new string fields are checked against 'dynamic_date_formats' and if the value matches then a new date field is added instead of string. */ + /** If date detection is enabled then new string fields are checked + * against 'dynamic_date_formats' and if the value matches then + * a new date field is added instead of string. */ dynamic_date_formats?: string[] /** Specify dynamic templates for the mapping. */ - dynamic_templates?: Record | Record[] + dynamic_templates?: Partial>[] /** Control whether field names are enabled for the index. */ _field_names?: MappingFieldNamesField - /** A mapping type can have custom meta data associated with it. These are not used at all by Elasticsearch, but can be used to store application-specific metadata. */ + /** A mapping type can have custom meta data associated with it. These are + * not used at all by Elasticsearch, but can be used to store + * application-specific metadata. */ _meta?: Metadata /** Automatically map strings into numeric data types for all fields. */ numeric_detection?: boolean - /** Mapping for a field. For new fields, this mapping can include: - Field name - Field data type - Mapping parameters */ + /** Mapping for a field. For new fields, this mapping can include: + * + * - Field name + * - Field data type + * - Mapping parameters */ properties?: Record /** Enable making a routing value required on indexed documents. */ _routing?: MappingRoutingField @@ -14299,50 +20258,75 @@ export interface IndicesPutMappingRequest extends RequestBase { export type IndicesPutMappingResponse = IndicesResponseBase export interface IndicesPutSettingsRequest extends RequestBase { -/** Comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. */ + /** Comma-separated list of data streams, indices, and aliases used to limit + * the request. Supports wildcards (`*`). To target all data streams and + * indices, omit this parameter or use `*` or `_all`. */ index?: Indices - /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. */ + /** If `false`, the request returns an error if any wildcard expression, index + * alias, or `_all` value targets only missing or closed indices. This + * behavior applies even if the request targets other open indices. For + * example, a request targeting `foo*,bar*` returns an error if an index + * starts with `foo` but no index starts with `bar`. */ allow_no_indices?: boolean - /** Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`. */ + /** Type of index that wildcard patterns can match. If the request can target + * data streams, this argument determines whether wildcard expressions match + * hidden data streams. Supports comma-separated values, such as + * `open,hidden`. */ expand_wildcards?: ExpandWildcards /** If `true`, returns settings in flat format. */ flat_settings?: boolean /** If `true`, returns settings in flat format. */ ignore_unavailable?: boolean - /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ + /** Period to wait for a connection to the master node. If no response is + * received before the timeout expires, the request fails and returns an + * error. */ master_timeout?: Duration /** If `true`, existing index settings remain unchanged. */ preserve_existing?: boolean - /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ + /** Whether to close and reopen the index to apply non-dynamic settings. + * If set to `true` the indices to which the settings are being applied + * will be closed temporarily and then reopened in order to apply the changes. */ + reopen?: boolean + /** Period to wait for a response. If no response is received before the + * timeout expires, the request fails and returns an error. */ timeout?: Duration settings?: IndicesIndexSettings /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, flat_settings?: never, ignore_unavailable?: never, master_timeout?: never, preserve_existing?: never, timeout?: never, settings?: never } + body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, flat_settings?: never, ignore_unavailable?: never, master_timeout?: never, preserve_existing?: never, reopen?: never, timeout?: never, settings?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, flat_settings?: never, ignore_unavailable?: never, master_timeout?: never, preserve_existing?: never, timeout?: never, settings?: never } + querystring?: { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, flat_settings?: never, ignore_unavailable?: never, master_timeout?: never, preserve_existing?: never, reopen?: never, timeout?: never, settings?: never } } export type IndicesPutSettingsResponse = AcknowledgedResponseBase export interface IndicesPutTemplateRequest extends RequestBase { -/** The name of the template */ + /** The name of the template */ name: Name /** If true, this request cannot replace or update existing index templates. */ create?: boolean - /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ + /** Period to wait for a connection to the master node. If no response is + * received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration + /** User defined reason for creating/updating the index template */ cause?: string /** Aliases for the index. */ aliases?: Record - /** Array of wildcard expressions used to match the names of indices during creation. */ + /** Array of wildcard expressions used to match the names + * of indices during creation. */ index_patterns?: string | string[] /** Mapping for fields in the index. */ mappings?: MappingTypeMapping - /** Order in which Elasticsearch applies this template if index matches multiple templates. Templates with lower 'order' values are merged first. Templates with higher 'order' values are merged later, overriding templates with lower values. */ + /** Order in which Elasticsearch applies this template if index + * matches multiple templates. + * + * Templates with lower 'order' values are merged first. Templates with higher + * 'order' values are merged later, overriding templates with lower values. */ order?: integer /** Configuration options for the index. */ settings?: IndicesIndexSettings - /** Version number used to manage index templates externally. This number is not automatically generated by Elasticsearch. To unset a version, replace the template without specifying one. */ + /** Version number used to manage index templates externally. This number + * is not automatically generated by Elasticsearch. + * To unset a version, replace the template without specifying one. */ version?: VersionNumber /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { name?: never, create?: never, master_timeout?: never, cause?: never, aliases?: never, index_patterns?: never, mappings?: never, order?: never, settings?: never, version?: never } @@ -14417,7 +20401,9 @@ export interface IndicesRecoveryRecoveryStatus { } export interface IndicesRecoveryRequest extends RequestBase { -/** Comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. */ + /** Comma-separated list of data streams, indices, and aliases used to limit the request. + * Supports wildcards (`*`). + * To target all data streams and indices, omit this parameter or use `*` or `_all`. */ index?: Indices /** If `true`, the response only includes ongoing shard recoveries. */ active_only?: boolean @@ -14467,11 +20453,17 @@ export interface IndicesRecoveryVerifyIndex { } export interface IndicesRefreshRequest extends RequestBase { -/** Comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. */ + /** Comma-separated list of data streams, indices, and aliases used to limit the request. + * Supports wildcards (`*`). + * To target all data streams and indices, omit this parameter or use `*` or `_all`. */ index?: Indices - /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. */ + /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. + * This behavior applies even if the request targets other open indices. */ allow_no_indices?: boolean - /** Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. */ + /** Type of index that wildcard patterns can match. + * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. + * Supports comma-separated values, such as `open,hidden`. + * Valid values are: `all`, `open`, `closed`, `hidden`, `none`. */ expand_wildcards?: ExpandWildcards /** If `false`, the request returns an error if it targets a missing or closed index. */ ignore_unavailable?: boolean @@ -14495,7 +20487,7 @@ export interface IndicesReloadSearchAnalyzersReloadResult { } export interface IndicesReloadSearchAnalyzersRequest extends RequestBase { -/** A comma-separated list of index names to reload analyzers for */ + /** A comma-separated list of index names to reload analyzers for */ index: Indices /** Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) */ allow_no_indices?: boolean @@ -14503,26 +20495,52 @@ export interface IndicesReloadSearchAnalyzersRequest extends RequestBase { expand_wildcards?: ExpandWildcards /** Whether specified concrete indices should be ignored when unavailable (missing or closed) */ ignore_unavailable?: boolean + /** Changed resource to reload analyzers from if applicable */ + resource?: string /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never } + body?: string | { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, resource?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never } + querystring?: { [key: string]: any } & { index?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_unavailable?: never, resource?: never } } export type IndicesReloadSearchAnalyzersResponse = IndicesReloadSearchAnalyzersReloadResult export interface IndicesResolveClusterRequest extends RequestBase { -/** A comma-separated list of names or index patterns for the indices, aliases, and data streams to resolve. Resources on remote clusters can be specified using the ``:`` syntax. Index and cluster exclusions (e.g., `-cluster1:*`) are also supported. If no index expression is specified, information about all remote clusters configured on the local cluster is returned without doing any index matching */ + /** A comma-separated list of names or index patterns for the indices, aliases, and data streams to resolve. + * Resources on remote clusters can be specified using the ``:`` syntax. + * Index and cluster exclusions (e.g., `-cluster1:*`) are also supported. + * If no index expression is specified, information about all remote clusters configured on the local cluster + * is returned without doing any index matching */ name?: Names - /** If false, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. NOTE: This option is only supported when specifying an index expression. You will get an error if you specify index options to the `_resolve/cluster` API endpoint that takes no index expression. */ + /** If false, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing + * or closed indices. This behavior applies even if the request targets other open indices. For example, a request + * targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. + * NOTE: This option is only supported when specifying an index expression. You will get an error if you specify index + * options to the `_resolve/cluster` API endpoint that takes no index expression. */ allow_no_indices?: boolean - /** Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. NOTE: This option is only supported when specifying an index expression. You will get an error if you specify index options to the `_resolve/cluster` API endpoint that takes no index expression. */ + /** Type of index that wildcard patterns can match. + * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. + * Supports comma-separated values, such as `open,hidden`. + * Valid values are: `all`, `open`, `closed`, `hidden`, `none`. + * NOTE: This option is only supported when specifying an index expression. You will get an error if you specify index + * options to the `_resolve/cluster` API endpoint that takes no index expression. */ expand_wildcards?: ExpandWildcards - /** If true, concrete, expanded, or aliased indices are ignored when frozen. NOTE: This option is only supported when specifying an index expression. You will get an error if you specify index options to the `_resolve/cluster` API endpoint that takes no index expression. */ + /** If true, concrete, expanded, or aliased indices are ignored when frozen. + * NOTE: This option is only supported when specifying an index expression. You will get an error if you specify index + * options to the `_resolve/cluster` API endpoint that takes no index expression. */ ignore_throttled?: boolean - /** If false, the request returns an error if it targets a missing or closed index. NOTE: This option is only supported when specifying an index expression. You will get an error if you specify index options to the `_resolve/cluster` API endpoint that takes no index expression. */ + /** If false, the request returns an error if it targets a missing or closed index. + * NOTE: This option is only supported when specifying an index expression. You will get an error if you specify index + * options to the `_resolve/cluster` API endpoint that takes no index expression. */ ignore_unavailable?: boolean - /** The maximum time to wait for remote clusters to respond. If a remote cluster does not respond within this timeout period, the API response will show the cluster as not connected and include an error message that the request timed out. The default timeout is unset and the query can take as long as the networking layer is configured to wait for remote clusters that are not responding (typically 30 seconds). */ + /** The maximum time to wait for remote clusters to respond. + * If a remote cluster does not respond within this timeout period, the API response + * will show the cluster as not connected and include an error message that the + * request timed out. + * + * The default timeout is unset and the query can take + * as long as the networking layer is configured to wait for remote clusters that are + * not responding (typically 30 seconds). */ timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { name?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_throttled?: never, ignore_unavailable?: never, timeout?: never } @@ -14531,23 +20549,36 @@ export interface IndicesResolveClusterRequest extends RequestBase { } export interface IndicesResolveClusterResolveClusterInfo { + /** Whether the remote cluster is connected to the local (querying) cluster. */ connected: boolean + /** The `skip_unavailable` setting for a remote cluster. */ skip_unavailable: boolean + /** Whether the index expression provided in the request matches any indices, aliases or data streams + * on the cluster. */ matching_indices?: boolean + /** Provides error messages that are likely to occur if you do a search with this index expression + * on the specified cluster (for example, lack of security privileges to query an index). */ error?: string + /** Provides version information about the cluster. */ version?: ElasticsearchVersionMinInfo } export type IndicesResolveClusterResponse = Record export interface IndicesResolveIndexRequest extends RequestBase { -/** Comma-separated name(s) or index pattern(s) of the indices, aliases, and data streams to resolve. Resources on remote clusters can be specified using the ``:`` syntax. */ + /** Comma-separated name(s) or index pattern(s) of the indices, aliases, and data streams to resolve. + * Resources on remote clusters can be specified using the ``:`` syntax. */ name: Names - /** Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. */ + /** Type of index that wildcard patterns can match. + * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. + * Supports comma-separated values, such as `open,hidden`. + * Valid values are: `all`, `open`, `closed`, `hidden`, `none`. */ expand_wildcards?: ExpandWildcards /** If `false`, the request returns an error if it targets a missing or closed index. */ ignore_unavailable?: boolean - /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. */ + /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. + * This behavior applies even if the request targets other open indices. + * For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. */ allow_no_indices?: boolean /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { name?: never, expand_wildcards?: never, ignore_unavailable?: never, allow_no_indices?: never } @@ -14580,30 +20611,45 @@ export interface IndicesResolveIndexResponse { } export interface IndicesRolloverRequest extends RequestBase { -/** Name of the data stream or index alias to roll over. */ + /** Name of the data stream or index alias to roll over. */ alias: IndexAlias - /** Name of the index to create. Supports date math. Data streams do not support this parameter. */ + /** Name of the index to create. + * Supports date math. + * Data streams do not support this parameter. */ new_index?: IndexName /** If `true`, checks whether the current index satisfies the specified conditions but does not perform a rollover. */ dry_run?: boolean - /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ + /** Period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration - /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ + /** Period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration - /** The number of shard copies that must be active before proceeding with the operation. Set to all or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). */ + /** The number of shard copies that must be active before proceeding with the operation. + * Set to all or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). */ wait_for_active_shards?: WaitForActiveShards - /** Aliases for the target index. Data streams do not support this parameter. */ + /** If set to true, the rollover action will only mark a data stream to signal that it needs to be rolled over at the next write. + * Only allowed on data streams. */ + lazy?: boolean + /** Aliases for the target index. + * Data streams do not support this parameter. */ aliases?: Record - /** Conditions for the rollover. If specified, Elasticsearch only performs the rollover if the current index satisfies these conditions. If this parameter is not specified, Elasticsearch performs the rollover unconditionally. If conditions are specified, at least one of them must be a `max_*` condition. The index will rollover if any `max_*` condition is satisfied and all `min_*` conditions are satisfied. */ + /** Conditions for the rollover. + * If specified, Elasticsearch only performs the rollover if the current index satisfies these conditions. + * If this parameter is not specified, Elasticsearch performs the rollover unconditionally. + * If conditions are specified, at least one of them must be a `max_*` condition. + * The index will rollover if any `max_*` condition is satisfied and all `min_*` conditions are satisfied. */ conditions?: IndicesRolloverRolloverConditions - /** Mapping for fields in the index. If specified, this mapping can include field names, field data types, and mapping paramaters. */ + /** Mapping for fields in the index. + * If specified, this mapping can include field names, field data types, and mapping paramaters. */ mappings?: MappingTypeMapping - /** Configuration options for the index. Data streams do not support this parameter. */ + /** Configuration options for the index. + * Data streams do not support this parameter. */ settings?: Record /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { alias?: never, new_index?: never, dry_run?: never, master_timeout?: never, timeout?: never, wait_for_active_shards?: never, aliases?: never, conditions?: never, mappings?: never, settings?: never } + body?: string | { [key: string]: any } & { alias?: never, new_index?: never, dry_run?: never, master_timeout?: never, timeout?: never, wait_for_active_shards?: never, lazy?: never, aliases?: never, conditions?: never, mappings?: never, settings?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { alias?: never, new_index?: never, dry_run?: never, master_timeout?: never, timeout?: never, wait_for_active_shards?: never, aliases?: never, conditions?: never, mappings?: never, settings?: never } + querystring?: { [key: string]: any } & { alias?: never, new_index?: never, dry_run?: never, master_timeout?: never, timeout?: never, wait_for_active_shards?: never, lazy?: never, aliases?: never, conditions?: never, mappings?: never, settings?: never } } export interface IndicesRolloverResponse { @@ -14639,11 +20685,17 @@ export interface IndicesSegmentsIndexSegment { } export interface IndicesSegmentsRequest extends RequestBase { -/** Comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. */ + /** Comma-separated list of data streams, indices, and aliases used to limit the request. + * Supports wildcards (`*`). + * To target all data streams and indices, omit this parameter or use `*` or `_all`. */ index?: Indices - /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. */ + /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. + * This behavior applies even if the request targets other open indices. */ allow_no_indices?: boolean - /** Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. */ + /** Type of index that wildcard patterns can match. + * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. + * Supports comma-separated values, such as `open,hidden`. + * Valid values are: `all`, `open`, `closed`, `hidden`, `none`. */ expand_wildcards?: ExpandWildcards /** If `false`, the request returns an error if it targets a missing or closed index. */ ignore_unavailable?: boolean @@ -14688,11 +20740,14 @@ export interface IndicesShardStoresIndicesShardStores { } export interface IndicesShardStoresRequest extends RequestBase { -/** List of data streams, indices, and aliases used to limit the request. */ + /** List of data streams, indices, and aliases used to limit the request. */ index?: Indices - /** If false, the request returns an error if any wildcard expression, index alias, or _all value targets only missing or closed indices. This behavior applies even if the request targets other open indices. */ + /** If false, the request returns an error if any wildcard expression, index alias, or _all + * value targets only missing or closed indices. This behavior applies even if the request + * targets other open indices. */ allow_no_indices?: boolean - /** Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. */ + /** Type of index that wildcard patterns can match. If the request can target data streams, + * this argument determines whether wildcard expressions match hidden data streams. */ expand_wildcards?: ExpandWildcards /** If true, missing or closed indices are not included in the response. */ ignore_unavailable?: boolean @@ -14739,17 +20794,21 @@ export interface IndicesShardStoresShardStoreWrapper { } export interface IndicesShrinkRequest extends RequestBase { -/** Name of the source index to shrink. */ + /** Name of the source index to shrink. */ index: IndexName /** Name of the target index to create. */ target: IndexName - /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ + /** Period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration - /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ + /** Period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration - /** The number of shard copies that must be active before proceeding with the operation. Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). */ + /** The number of shard copies that must be active before proceeding with the operation. + * Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). */ wait_for_active_shards?: WaitForActiveShards - /** The key is the alias name. Index alias names support date math. */ + /** The key is the alias name. + * Index alias names support date math. */ aliases?: Record /** Configuration options for the target index. */ settings?: Record @@ -14766,16 +20825,20 @@ export interface IndicesShrinkResponse { } export interface IndicesSimulateIndexTemplateRequest extends RequestBase { -/** Name of the index to simulate */ + /** Name of the index to simulate */ name: Name + /** Whether the index template we optionally defined in the body should only be dry-run added if new or can also replace an existing one */ + create?: boolean + /** User defined reason for dry-run creating the new template for simulation purposes */ + cause?: string /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration /** If true, returns all relevant default configurations for the index template. */ include_defaults?: boolean /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { name?: never, master_timeout?: never, include_defaults?: never } + body?: string | { [key: string]: any } & { name?: never, create?: never, cause?: never, master_timeout?: never, include_defaults?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { name?: never, master_timeout?: never, include_defaults?: never } + querystring?: { [key: string]: any } & { name?: never, create?: never, cause?: never, master_timeout?: never, include_defaults?: never } } export interface IndicesSimulateIndexTemplateResponse { @@ -14789,38 +20852,55 @@ export interface IndicesSimulateTemplateOverlapping { } export interface IndicesSimulateTemplateRequest extends RequestBase { -/** Name of the index template to simulate. To test a template configuration before you add it to the cluster, omit this parameter and specify the template configuration in the request body. */ + /** Name of the index template to simulate. To test a template configuration before you add it to the cluster, omit + * this parameter and specify the template configuration in the request body. */ name?: Name /** If true, the template passed in the body is only used if no existing templates match the same index patterns. If false, the simulation uses the template with the highest priority. Note that the template is not permanently added or updated in either case; it is only used for the simulation. */ create?: boolean + /** User defined reason for dry-run creating the new template for simulation purposes */ + cause?: string /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration /** If true, returns all relevant default configurations for the index template. */ include_defaults?: boolean - /** This setting overrides the value of the `action.auto_create_index` cluster setting. If set to `true` in a template, then indices can be automatically created using that template even if auto-creation of indices is disabled via `actions.auto_create_index`. If set to `false`, then indices or data streams matching the template must always be explicitly created, and may never be automatically created. */ + /** This setting overrides the value of the `action.auto_create_index` cluster setting. + * If set to `true` in a template, then indices can be automatically created using that template even if auto-creation of indices is disabled via `actions.auto_create_index`. + * If set to `false`, then indices or data streams matching the template must always be explicitly created, and may never be automatically created. */ allow_auto_create?: boolean /** Array of wildcard (`*`) expressions used to match the names of data streams and indices during creation. */ index_patterns?: Indices - /** An ordered list of component template names. Component templates are merged in the order specified, meaning that the last component template specified has the highest precedence. */ + /** An ordered list of component template names. + * Component templates are merged in the order specified, meaning that the last component template specified has the highest precedence. */ composed_of?: Name[] - /** Template to be applied. It may optionally include an `aliases`, `mappings`, or `settings` configuration. */ + /** Template to be applied. + * It may optionally include an `aliases`, `mappings`, or `settings` configuration. */ template?: IndicesPutIndexTemplateIndexTemplateMapping - /** If this object is included, the template is used to create data streams and their backing indices. Supports an empty object. Data streams require a matching index template with a `data_stream` object. */ + /** If this object is included, the template is used to create data streams and their backing indices. + * Supports an empty object. + * Data streams require a matching index template with a `data_stream` object. */ data_stream?: IndicesDataStreamVisibility - /** Priority to determine index template precedence when a new data stream or index is created. The index template with the highest priority is chosen. If no priority is specified the template is treated as though it is of priority 0 (lowest priority). This number is not automatically generated by Elasticsearch. */ + /** Priority to determine index template precedence when a new data stream or index is created. + * The index template with the highest priority is chosen. + * If no priority is specified the template is treated as though it is of priority 0 (lowest priority). + * This number is not automatically generated by Elasticsearch. */ priority?: long - /** Version number used to manage index templates externally. This number is not automatically generated by Elasticsearch. */ + /** Version number used to manage index templates externally. + * This number is not automatically generated by Elasticsearch. */ version?: VersionNumber - /** Optional user metadata about the index template. May have any contents. This map is not automatically generated by Elasticsearch. */ + /** Optional user metadata about the index template. + * May have any contents. + * This map is not automatically generated by Elasticsearch. */ _meta?: Metadata - /** The configuration option ignore_missing_component_templates can be used when an index template references a component template that might not exist */ + /** The configuration option ignore_missing_component_templates can be used when an index template + * references a component template that might not exist */ ignore_missing_component_templates?: string[] - /** Marks this index template as deprecated. When creating or updating a non-deprecated index template that uses deprecated components, Elasticsearch will emit a deprecation warning. */ + /** Marks this index template as deprecated. When creating or updating a non-deprecated index template + * that uses deprecated components, Elasticsearch will emit a deprecation warning. */ deprecated?: boolean /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { name?: never, create?: never, master_timeout?: never, include_defaults?: never, allow_auto_create?: never, index_patterns?: never, composed_of?: never, template?: never, data_stream?: never, priority?: never, version?: never, _meta?: never, ignore_missing_component_templates?: never, deprecated?: never } + body?: string | { [key: string]: any } & { name?: never, create?: never, cause?: never, master_timeout?: never, include_defaults?: never, allow_auto_create?: never, index_patterns?: never, composed_of?: never, template?: never, data_stream?: never, priority?: never, version?: never, _meta?: never, ignore_missing_component_templates?: never, deprecated?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { name?: never, create?: never, master_timeout?: never, include_defaults?: never, allow_auto_create?: never, index_patterns?: never, composed_of?: never, template?: never, data_stream?: never, priority?: never, version?: never, _meta?: never, ignore_missing_component_templates?: never, deprecated?: never } + querystring?: { [key: string]: any } & { name?: never, create?: never, cause?: never, master_timeout?: never, include_defaults?: never, allow_auto_create?: never, index_patterns?: never, composed_of?: never, template?: never, data_stream?: never, priority?: never, version?: never, _meta?: never, ignore_missing_component_templates?: never, deprecated?: never } } export interface IndicesSimulateTemplateResponse { @@ -14835,15 +20915,18 @@ export interface IndicesSimulateTemplateTemplate { } export interface IndicesSplitRequest extends RequestBase { -/** Name of the source index to split. */ + /** Name of the source index to split. */ index: IndexName /** Name of the target index to create. */ target: IndexName - /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ + /** Period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration - /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ + /** Period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration - /** The number of shard copies that must be active before proceeding with the operation. Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). */ + /** The number of shard copies that must be active before proceeding with the operation. + * Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). */ wait_for_active_shards?: WaitForActiveShards /** Aliases for the resulting index. */ aliases?: Record @@ -14864,22 +20947,39 @@ export interface IndicesSplitResponse { export type IndicesStatsIndexMetadataState = 'open' | 'close' export interface IndicesStatsIndexStats { + /** Contains statistics about completions across all shards assigned to the node. */ completion?: CompletionStats + /** Contains statistics about documents across all primary shards assigned to the node. */ docs?: DocStats + /** Contains statistics about the field data cache across all shards assigned to the node. */ fielddata?: FielddataStats + /** Contains statistics about flush operations for the node. */ flush?: FlushStats + /** Contains statistics about get operations for the node. */ get?: GetStats + /** Contains statistics about indexing operations for the node. */ indexing?: IndexingStats + /** Contains statistics about indices operations for the node. */ indices?: IndicesStatsIndicesStats + /** Contains statistics about merge operations for the node. */ merges?: MergesStats + /** Contains statistics about the query cache across all shards assigned to the node. */ query_cache?: QueryCacheStats + /** Contains statistics about recovery operations for the node. */ recovery?: RecoveryStats + /** Contains statistics about refresh operations for the node. */ refresh?: RefreshStats + /** Contains statistics about the request cache across all shards assigned to the node. */ request_cache?: RequestCacheStats + /** Contains statistics about search operations for the node. */ search?: SearchStats + /** Contains statistics about segments across all shards assigned to the node. */ segments?: SegmentsStats + /** Contains statistics about the size of shards assigned to the node. */ store?: StoreStats + /** Contains statistics about transaction log operations for the node. */ translog?: TranslogStats + /** Contains statistics about index warming operations for the node. */ warmer?: WarmerStats bulk?: BulkStats shard_stats?: IndicesStatsShardsTotalStats @@ -14901,13 +21001,15 @@ export interface IndicesStatsMappingStats { } export interface IndicesStatsRequest extends RequestBase { -/** Limit the information returned the specific metrics. */ + /** Limit the information returned the specific metrics. */ metric?: Metrics /** A comma-separated list of index names; use `_all` or empty string to perform the operation on all indices */ index?: Indices /** Comma-separated list or wildcard expressions of fields to include in fielddata and suggest statistics. */ completion_fields?: Fields - /** Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`. */ + /** Type of index that wildcard patterns can match. If the request can target data streams, this argument + * determines whether wildcard expressions match hidden data streams. Supports comma-separated values, + * such as `open,hidden`. */ expand_wildcards?: ExpandWildcards /** Comma-separated list or wildcard expressions of fields to include in fielddata statistics. */ fielddata_fields?: Fields @@ -15029,43 +21131,84 @@ export interface IndicesStatsShardsTotalStats { } export interface IndicesUpdateAliasesAction { + /** Adds a data stream or index to an alias. + * If the alias doesn’t exist, the `add` action creates it. */ add?: IndicesUpdateAliasesAddAction + /** Removes a data stream or index from an alias. */ remove?: IndicesUpdateAliasesRemoveAction + /** Deletes an index. + * You cannot use this action on aliases or data streams. */ remove_index?: IndicesUpdateAliasesRemoveIndexAction } export interface IndicesUpdateAliasesAddAction { + /** Alias for the action. + * Index alias names support date math. */ alias?: IndexAlias + /** Aliases for the action. + * Index alias names support date math. */ aliases?: IndexAlias | IndexAlias[] + /** Query used to limit documents the alias can access. */ filter?: QueryDslQueryContainer + /** Data stream or index for the action. + * Supports wildcards (`*`). */ index?: IndexName + /** Data streams or indices for the action. + * Supports wildcards (`*`). */ indices?: Indices + /** Value used to route indexing operations to a specific shard. + * If specified, this overwrites the `routing` value for indexing operations. + * Data stream aliases don’t support this parameter. */ index_routing?: Routing + /** If `true`, the alias is hidden. */ is_hidden?: boolean + /** If `true`, sets the write index or data stream for the alias. */ is_write_index?: boolean + /** Value used to route indexing and search operations to a specific shard. + * Data stream aliases don’t support this parameter. */ routing?: Routing + /** Value used to route search operations to a specific shard. + * If specified, this overwrites the `routing` value for search operations. + * Data stream aliases don’t support this parameter. */ search_routing?: Routing + /** If `true`, the alias must exist to perform the action. */ must_exist?: boolean } export interface IndicesUpdateAliasesRemoveAction { + /** Alias for the action. + * Index alias names support date math. */ alias?: IndexAlias + /** Aliases for the action. + * Index alias names support date math. */ aliases?: IndexAlias | IndexAlias[] + /** Data stream or index for the action. + * Supports wildcards (`*`). */ index?: IndexName + /** Data streams or indices for the action. + * Supports wildcards (`*`). */ indices?: Indices + /** If `true`, the alias must exist to perform the action. */ must_exist?: boolean } export interface IndicesUpdateAliasesRemoveIndexAction { + /** Data stream or index for the action. + * Supports wildcards (`*`). */ index?: IndexName + /** Data streams or indices for the action. + * Supports wildcards (`*`). */ indices?: Indices + /** If `true`, the alias must exist to perform the action. */ must_exist?: boolean } export interface IndicesUpdateAliasesRequest extends RequestBase { -/** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ + /** Period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration - /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ + /** Period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration /** Actions to perform. */ actions?: IndicesUpdateAliasesAction[] @@ -15085,21 +21228,29 @@ export interface IndicesValidateQueryIndicesValidationExplanation { } export interface IndicesValidateQueryRequest extends RequestBase { -/** Comma-separated list of data streams, indices, and aliases to search. Supports wildcards (`*`). To search all data streams or indices, omit this parameter or use `*` or `_all`. */ + /** Comma-separated list of data streams, indices, and aliases to search. + * Supports wildcards (`*`). + * To search all data streams or indices, omit this parameter or use `*` or `_all`. */ index?: Indices - /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. */ + /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. + * This behavior applies even if the request targets other open indices. */ allow_no_indices?: boolean /** If `true`, the validation is executed on all shards instead of one random shard per index. */ all_shards?: boolean - /** Analyzer to use for the query string. This parameter can only be used when the `q` query string parameter is specified. */ + /** Analyzer to use for the query string. + * This parameter can only be used when the `q` query string parameter is specified. */ analyzer?: string /** If `true`, wildcard and prefix queries are analyzed. */ analyze_wildcard?: boolean /** The default operator for query string query: `AND` or `OR`. */ default_operator?: QueryDslOperator - /** Field to use as default where no field prefix is given in the query string. This parameter can only be used when the `q` query string parameter is specified. */ + /** Field to use as default where no field prefix is given in the query string. + * This parameter can only be used when the `q` query string parameter is specified. */ df?: string - /** Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. */ + /** Type of index that wildcard patterns can match. + * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. + * Supports comma-separated values, such as `open,hidden`. + * Valid values are: `all`, `open`, `closed`, `hidden`, `none`. */ expand_wildcards?: ExpandWildcards /** If `true`, the response returns detailed information if an error has occurred. */ explain?: boolean @@ -15126,10 +21277,365 @@ export interface IndicesValidateQueryResponse { error?: string } +export interface InferenceAdaptiveAllocations { + /** Turn on `adaptive_allocations`. */ + enabled?: boolean + /** The maximum number of allocations to scale to. + * If set, it must be greater than or equal to `min_number_of_allocations`. */ + max_number_of_allocations?: integer + /** The minimum number of allocations to scale to. + * If set, it must be greater than or equal to 0. + * If not defined, the deployment scales to 0. */ + min_number_of_allocations?: integer +} + +export interface InferenceAlibabaCloudServiceSettings { + /** A valid API key for the AlibabaCloud AI Search API. */ + api_key: string + /** The name of the host address used for the inference task. + * You can find the host address in the API keys section of the documentation. */ + host: string + /** This setting helps to minimize the number of rate limit errors returned from AlibabaCloud AI Search. + * By default, the `alibabacloud-ai-search` service sets the number of requests allowed per minute to `1000`. */ + rate_limit?: InferenceRateLimitSetting + /** The name of the model service to use for the inference task. + * The following service IDs are available for the `completion` task: + * + * * `ops-qwen-turbo` + * * `qwen-turbo` + * * `qwen-plus` + * * `qwen-max ÷ qwen-max-longcontext` + * + * The following service ID is available for the `rerank` task: + * + * * `ops-bge-reranker-larger` + * + * The following service ID is available for the `sparse_embedding` task: + * + * * `ops-text-sparse-embedding-001` + * + * The following service IDs are available for the `text_embedding` task: + * + * `ops-text-embedding-001` + * `ops-text-embedding-zh-001` + * `ops-text-embedding-en-001` + * `ops-text-embedding-002` */ + service_id: string + /** The name of the workspace used for the inference task. */ + workspace: string +} + +export type InferenceAlibabaCloudServiceType = 'alibabacloud-ai-search' + +export interface InferenceAlibabaCloudTaskSettings { + /** For a `sparse_embedding` or `text_embedding` task, specify the type of input passed to the model. + * Valid values are: + * + * * `ingest` for storing document embeddings in a vector database. + * * `search` for storing embeddings of search queries run against a vector database to find relevant documents. */ + input_type?: string + /** For a `sparse_embedding` task, it affects whether the token name will be returned in the response. + * It defaults to `false`, which means only the token ID will be returned in the response. */ + return_token?: boolean +} + +export type InferenceAlibabaCloudTaskType = 'completion' | 'rerank' | 'space_embedding' | 'text_embedding' + +export interface InferenceAmazonBedrockServiceSettings { + /** A valid AWS access key that has permissions to use Amazon Bedrock and access to models for inference requests. */ + access_key: string + /** The base model ID or an ARN to a custom model based on a foundational model. + * The base model IDs can be found in the Amazon Bedrock documentation. + * Note that the model ID must be available for the provider chosen and your IAM user must have access to the model. */ + model: string + /** The model provider for your deployment. + * Note that some providers may support only certain task types. + * Supported providers include: + * + * * `amazontitan` - available for `text_embedding` and `completion` task types + * * `anthropic` - available for `completion` task type only + * * `ai21labs` - available for `completion` task type only + * * `cohere` - available for `text_embedding` and `completion` task types + * * `meta` - available for `completion` task type only + * * `mistral` - available for `completion` task type only */ + provider?: string + /** The region that your model or ARN is deployed in. + * The list of available regions per model can be found in the Amazon Bedrock documentation. */ + region: string + /** This setting helps to minimize the number of rate limit errors returned from Watsonx. + * By default, the `watsonxai` service sets the number of requests allowed per minute to 120. */ + rate_limit?: InferenceRateLimitSetting + /** A valid AWS secret key that is paired with the `access_key`. + * For informationg about creating and managing access and secret keys, refer to the AWS documentation. */ + secret_key: string +} + +export type InferenceAmazonBedrockServiceType = 'amazonbedrock' + +export interface InferenceAmazonBedrockTaskSettings { + /** For a `completion` task, it sets the maximum number for the output tokens to be generated. */ + max_new_tokens?: integer + /** For a `completion` task, it is a number between 0.0 and 1.0 that controls the apparent creativity of the results. + * At temperature 0.0 the model is most deterministic, at temperature 1.0 most random. + * It should not be used if `top_p` or `top_k` is specified. */ + temperature?: float + /** For a `completion` task, it limits samples to the top-K most likely words, balancing coherence and variability. + * It is only available for anthropic, cohere, and mistral providers. + * It is an alternative to `temperature`; it should not be used if `temperature` is specified. */ + top_k?: float + /** For a `completion` task, it is a number in the range of 0.0 to 1.0, to eliminate low-probability tokens. + * Top-p uses nucleus sampling to select top tokens whose sum of likelihoods does not exceed a certain value, ensuring both variety and coherence. + * It is an alternative to `temperature`; it should not be used if `temperature` is specified. */ + top_p?: float +} + +export type InferenceAmazonBedrockTaskType = 'completion' | 'text_embedding' + +export interface InferenceAnthropicServiceSettings { + /** A valid API key for the Anthropic API. */ + api_key: string + /** The name of the model to use for the inference task. + * Refer to the Anthropic documentation for the list of supported models. */ + model_id: string + /** This setting helps to minimize the number of rate limit errors returned from Anthropic. + * By default, the `anthropic` service sets the number of requests allowed per minute to 50. */ + rate_limit?: InferenceRateLimitSetting +} + +export type InferenceAnthropicServiceType = 'anthropic' + +export interface InferenceAnthropicTaskSettings { + /** For a `completion` task, it is the maximum number of tokens to generate before stopping. */ + max_tokens: integer + /** For a `completion` task, it is the amount of randomness injected into the response. + * For more details about the supported range, refer to Anthropic documentation. */ + temperature?: float + /** For a `completion` task, it specifies to only sample from the top K options for each subsequent token. + * It is recommended for advanced use cases only. + * You usually only need to use `temperature`. */ + top_k?: integer + /** For a `completion` task, it specifies to use Anthropic's nucleus sampling. + * In nucleus sampling, Anthropic computes the cumulative distribution over all the options for each subsequent token in decreasing probability order and cuts it off once it reaches the specified probability. + * You should either alter `temperature` or `top_p`, but not both. + * It is recommended for advanced use cases only. + * You usually only need to use `temperature`. */ + top_p?: float +} + +export type InferenceAnthropicTaskType = 'completion' + +export interface InferenceAzureAiStudioServiceSettings { + /** A valid API key of your Azure AI Studio model deployment. + * This key can be found on the overview page for your deployment in the management section of your Azure AI Studio account. + * + * IMPORTANT: You need to provide the API key only once, during the inference model creation. + * The get inference endpoint API does not retrieve your API key. + * After creating the inference model, you cannot change the associated API key. + * If you want to use a different API key, delete the inference model and recreate it with the same name and the updated API key. */ + api_key: string + /** The type of endpoint that is available for deployment through Azure AI Studio: `token` or `realtime`. + * The `token` endpoint type is for "pay as you go" endpoints that are billed per token. + * The `realtime` endpoint type is for "real-time" endpoints that are billed per hour of usage. */ + endpoint_type: string + /** The target URL of your Azure AI Studio model deployment. + * This can be found on the overview page for your deployment in the management section of your Azure AI Studio account. */ + target: string + /** The model provider for your deployment. + * Note that some providers may support only certain task types. + * Supported providers include: + * + * * `cohere` - available for `text_embedding` and `completion` task types + * * `databricks` - available for `completion` task type only + * * `meta` - available for `completion` task type only + * * `microsoft_phi` - available for `completion` task type only + * * `mistral` - available for `completion` task type only + * * `openai` - available for `text_embedding` and `completion` task types */ + provider: string + /** This setting helps to minimize the number of rate limit errors returned from Azure AI Studio. + * By default, the `azureaistudio` service sets the number of requests allowed per minute to 240. */ + rate_limit?: InferenceRateLimitSetting +} + +export type InferenceAzureAiStudioServiceType = 'azureaistudio' + +export interface InferenceAzureAiStudioTaskSettings { + /** For a `completion` task, instruct the inference process to perform sampling. + * It has no effect unless `temperature` or `top_p` is specified. */ + do_sample?: float + /** For a `completion` task, provide a hint for the maximum number of output tokens to be generated. */ + max_new_tokens?: integer + /** For a `completion` task, control the apparent creativity of generated completions with a sampling temperature. + * It must be a number in the range of 0.0 to 2.0. + * It should not be used if `top_p` is specified. */ + temperature?: float + /** For a `completion` task, make the model consider the results of the tokens with nucleus sampling probability. + * It is an alternative value to `temperature` and must be a number in the range of 0.0 to 2.0. + * It should not be used if `temperature` is specified. */ + top_p?: float + /** For a `text_embedding` task, specify the user issuing the request. + * This information can be used for abuse detection. */ + user?: string +} + +export type InferenceAzureAiStudioTaskType = 'completion' | 'text_embedding' + +export interface InferenceAzureOpenAIServiceSettings { + /** A valid API key for your Azure OpenAI account. + * You must specify either `api_key` or `entra_id`. + * If you do not provide either or you provide both, you will receive an error when you try to create your model. + * + * IMPORTANT: You need to provide the API key only once, during the inference model creation. + * The get inference endpoint API does not retrieve your API key. + * After creating the inference model, you cannot change the associated API key. + * If you want to use a different API key, delete the inference model and recreate it with the same name and the updated API key. */ + api_key?: string + /** The Azure API version ID to use. + * It is recommended to use the latest supported non-preview version. */ + api_version: string + /** The deployment name of your deployed models. + * Your Azure OpenAI deployments can be found though the Azure OpenAI Studio portal that is linked to your subscription. */ + deployment_id: string + /** A valid Microsoft Entra token. + * You must specify either `api_key` or `entra_id`. + * If you do not provide either or you provide both, you will receive an error when you try to create your model. */ + entra_id?: string + /** This setting helps to minimize the number of rate limit errors returned from Azure. + * The `azureopenai` service sets a default number of requests allowed per minute depending on the task type. + * For `text_embedding`, it is set to `1440`. + * For `completion`, it is set to `120`. */ + rate_limit?: InferenceRateLimitSetting + /** The name of your Azure OpenAI resource. + * You can find this from the list of resources in the Azure Portal for your subscription. */ + resource_name: string +} + +export type InferenceAzureOpenAIServiceType = 'azureopenai' + +export interface InferenceAzureOpenAITaskSettings { + /** For a `completion` or `text_embedding` task, specify the user issuing the request. + * This information can be used for abuse detection. */ + user?: string +} + +export type InferenceAzureOpenAITaskType = 'completion' | 'text_embedding' + +export type InferenceCohereEmbeddingType = 'byte' | 'float' | 'int8' + +export type InferenceCohereInputType = 'classification' | 'clustering' | 'ingest' | 'search' + +export interface InferenceCohereServiceSettings { + /** A valid API key for your Cohere account. + * You can find or create your Cohere API keys on the Cohere API key settings page. + * + * IMPORTANT: You need to provide the API key only once, during the inference model creation. + * The get inference endpoint API does not retrieve your API key. + * After creating the inference model, you cannot change the associated API key. + * If you want to use a different API key, delete the inference model and recreate it with the same name and the updated API key. */ + api_key: string + /** For a `text_embedding` task, the types of embeddings you want to get back. + * Use `byte` for signed int8 embeddings (this is a synonym of `int8`). + * Use `float` for the default float embeddings. + * Use `int8` for signed int8 embeddings. */ + embedding_type?: InferenceCohereEmbeddingType + /** For a `completion`, `rerank`, or `text_embedding` task, the name of the model to use for the inference task. + * + * * For the available `completion` models, refer to the [Cohere command docs](https://docs.cohere.com/docs/models#command). + * * For the available `rerank` models, refer to the [Cohere rerank docs](https://docs.cohere.com/reference/rerank-1). + * * For the available `text_embedding` models, refer to [Cohere embed docs](https://docs.cohere.com/reference/embed). + * + * The default value for a text embedding task is `embed-english-v2.0`. */ + model_id?: string + /** This setting helps to minimize the number of rate limit errors returned from Cohere. + * By default, the `cohere` service sets the number of requests allowed per minute to 10000. */ + rate_limit?: InferenceRateLimitSetting + /** The similarity measure. + * If the `embedding_type` is `float`, the default value is `dot_product`. + * If the `embedding_type` is `int8` or `byte`, the default value is `cosine`. */ + similarity?: InferenceCohereSimilarityType +} + +export type InferenceCohereServiceType = 'cohere' + +export type InferenceCohereSimilarityType = 'cosine' | 'dot_product' | 'l2_norm' + +export interface InferenceCohereTaskSettings { + /** For a `text_embedding` task, the type of input passed to the model. + * Valid values are: + * + * * `classification`: Use it for embeddings passed through a text classifier. + * * `clustering`: Use it for the embeddings run through a clustering algorithm. + * * `ingest`: Use it for storing document embeddings in a vector database. + * * `search`: Use it for storing embeddings of search queries run against a vector database to find relevant documents. + * + * IMPORTANT: The `input_type` field is required when using embedding models `v3` and higher. */ + input_type?: InferenceCohereInputType + /** For a `rerank` task, return doc text within the results. */ + return_documents?: boolean + /** For a `rerank` task, the number of most relevant documents to return. + * It defaults to the number of the documents. + * If this inference endpoint is used in a `text_similarity_reranker` retriever query and `top_n` is set, it must be greater than or equal to `rank_window_size` in the query. */ + top_n?: integer + /** For a `text_embedding` task, the method to handle inputs longer than the maximum token length. + * Valid values are: + * + * * `END`: When the input exceeds the maximum input token length, the end of the input is discarded. + * * `NONE`: When the input exceeds the maximum input token length, an error is returned. + * * `START`: When the input exceeds the maximum input token length, the start of the input is discarded. */ + truncate?: InferenceCohereTruncateType +} + +export type InferenceCohereTaskType = 'completion' | 'rerank' | 'text_embedding' + +export type InferenceCohereTruncateType = 'END' | 'NONE' | 'START' + +export interface InferenceCompletionInferenceResult { + completion: InferenceCompletionResult[] +} + export interface InferenceCompletionResult { result: string } +export interface InferenceCompletionTool { + /** The type of tool. */ + type: string + /** The function definition. */ + function: InferenceCompletionToolFunction +} + +export interface InferenceCompletionToolChoice { + /** The type of the tool. */ + type: string + /** The tool choice function. */ + function: InferenceCompletionToolChoiceFunction +} + +export interface InferenceCompletionToolChoiceFunction { + /** The name of the function to call. */ + name: string +} + +export interface InferenceCompletionToolFunction { + /** A description of what the function does. + * This is used by the model to choose when and how to call the function. */ + description?: string + /** The name of the function. */ + name: string + /** The parameters the functional accepts. This should be formatted as a JSON object. */ + parameters?: any + /** Whether to enable schema adherence when generating the function call. */ + strict?: boolean +} + +export type InferenceCompletionToolType = string | InferenceCompletionToolChoice + +export interface InferenceContentObject { + /** The text content. */ + text: string + /** The type of content. */ + type: string +} + export interface InferenceDeleteInferenceEndpointResult extends AcknowledgedResponseBase { pipelines: string[] } @@ -15138,22 +21644,159 @@ export type InferenceDenseByteVector = byte[] export type InferenceDenseVector = float[] -export interface InferenceInferenceChunkingSettings extends InferenceInferenceEndpoint { +export interface InferenceElasticsearchServiceSettings { + /** Adaptive allocations configuration details. + * If `enabled` is true, the number of allocations of the model is set based on the current load the process gets. + * When the load is high, a new model allocation is automatically created, respecting the value of `max_number_of_allocations` if it's set. + * When the load is low, a model allocation is automatically removed, respecting the value of `min_number_of_allocations` if it's set. + * If `enabled` is true, do not set the number of allocations manually. */ + adaptive_allocations?: InferenceAdaptiveAllocations + /** The deployment identifier for a trained model deployment. + * When `deployment_id` is used the `model_id` is optional. */ + deployment_id?: string + /** The name of the model to use for the inference task. + * It can be the ID of a built-in model (for example, `.multilingual-e5-small` for E5) or a text embedding model that was uploaded by using the Eland client. */ + model_id: string + /** The total number of allocations that are assigned to the model across machine learning nodes. + * Increasing this value generally increases the throughput. + * If adaptive allocations are enabled, do not set this value because it's automatically set. */ + num_allocations?: integer + /** The number of threads used by each model allocation during inference. + * This setting generally increases the speed per inference request. + * The inference process is a compute-bound process; `threads_per_allocations` must not exceed the number of available allocated processors per node. + * The value must be a power of 2. + * The maximum value is 32. */ + num_threads: integer +} + +export type InferenceElasticsearchServiceType = 'elasticsearch' + +export interface InferenceElasticsearchTaskSettings { + /** For a `rerank` task, return the document instead of only the index. */ + return_documents?: boolean +} + +export type InferenceElasticsearchTaskType = 'rerank' | 'sparse_embedding' | 'text_embedding' + +export interface InferenceElserServiceSettings { + /** Adaptive allocations configuration details. + * If `enabled` is true, the number of allocations of the model is set based on the current load the process gets. + * When the load is high, a new model allocation is automatically created, respecting the value of `max_number_of_allocations` if it's set. + * When the load is low, a model allocation is automatically removed, respecting the value of `min_number_of_allocations` if it's set. + * If `enabled` is true, do not set the number of allocations manually. */ + adaptive_allocations?: InferenceAdaptiveAllocations + /** The total number of allocations this model is assigned across machine learning nodes. + * Increasing this value generally increases the throughput. + * If adaptive allocations is enabled, do not set this value because it's automatically set. */ + num_allocations: integer + /** The number of threads used by each model allocation during inference. + * Increasing this value generally increases the speed per inference request. + * The inference process is a compute-bound process; `threads_per_allocations` must not exceed the number of available allocated processors per node. + * The value must be a power of 2. + * The maximum value is 32. + * + * > info + * > If you want to optimize your ELSER endpoint for ingest, set the number of threads to 1. If you want to optimize your ELSER endpoint for search, set the number of threads to greater than 1. */ + num_threads: integer +} + +export type InferenceElserServiceType = 'elser' + +export type InferenceElserTaskType = 'sparse_embedding' + +export type InferenceGoogleAiServiceType = 'googleaistudio' + +export interface InferenceGoogleAiStudioServiceSettings { + /** A valid API key of your Google Gemini account. */ + api_key: string + /** The name of the model to use for the inference task. + * Refer to the Google documentation for the list of supported models. */ + model_id: string + /** This setting helps to minimize the number of rate limit errors returned from Google AI Studio. + * By default, the `googleaistudio` service sets the number of requests allowed per minute to 360. */ + rate_limit?: InferenceRateLimitSetting +} + +export type InferenceGoogleAiStudioTaskType = 'completion' | 'text_embedding' + +export interface InferenceGoogleVertexAIServiceSettings { + /** The name of the location to use for the inference task. + * Refer to the Google documentation for the list of supported locations. */ + location: string + /** The name of the model to use for the inference task. + * Refer to the Google documentation for the list of supported models. */ + model_id: string + /** The name of the project to use for the inference task. */ + project_id: string + /** This setting helps to minimize the number of rate limit errors returned from Google Vertex AI. + * By default, the `googlevertexai` service sets the number of requests allowed per minute to 30.000. */ + rate_limit?: InferenceRateLimitSetting + /** A valid service account in JSON format for the Google Vertex AI API. */ + service_account_json: string +} + +export type InferenceGoogleVertexAIServiceType = 'googlevertexai' + +export interface InferenceGoogleVertexAITaskSettings { + /** For a `text_embedding` task, truncate inputs longer than the maximum token length automatically. */ + auto_truncate?: boolean + /** For a `rerank` task, the number of the top N documents that should be returned. */ + top_n?: integer +} + +export type InferenceGoogleVertexAITaskType = 'rerank' | 'text_embedding' + +export interface InferenceHuggingFaceServiceSettings { + /** A valid access token for your HuggingFace account. + * You can create or find your access tokens on the HuggingFace settings page. + * + * IMPORTANT: You need to provide the API key only once, during the inference model creation. + * The get inference endpoint API does not retrieve your API key. + * After creating the inference model, you cannot change the associated API key. + * If you want to use a different API key, delete the inference model and recreate it with the same name and the updated API key. */ + api_key: string + /** This setting helps to minimize the number of rate limit errors returned from Hugging Face. + * By default, the `hugging_face` service sets the number of requests allowed per minute to 3000. */ + rate_limit?: InferenceRateLimitSetting + /** The URL endpoint to use for the requests. */ + url: string +} + +export type InferenceHuggingFaceServiceType = 'hugging_face' + +export type InferenceHuggingFaceTaskType = 'text_embedding' + +export interface InferenceInferenceChunkingSettings { + /** The maximum size of a chunk in words. + * This value cannot be higher than `300` or lower than `20` (for `sentence` strategy) or `10` (for `word` strategy). */ max_chunk_size?: integer + /** The number of overlapping words for chunks. + * It is applicable only to a `word` chunking strategy. + * This value cannot be higher than half the `max_chunk_size` value. */ overlap?: integer + /** The number of overlapping sentences for chunks. + * It is applicable only for a `sentence` chunking strategy. + * It can be either `1` or `0`. */ sentence_overlap?: integer + /** The chunking strategy: `sentence` or `word`. */ strategy?: string } export interface InferenceInferenceEndpoint { + /** Chunking configuration object */ chunking_settings?: InferenceInferenceChunkingSettings + /** The service type */ service: string + /** Settings specific to the service */ service_settings: InferenceServiceSettings + /** Task settings specific to the service and task type */ task_settings?: InferenceTaskSettings } export interface InferenceInferenceEndpointInfo extends InferenceInferenceEndpoint { + /** The inference Id */ inference_id: string + /** The task type */ task_type: InferenceTaskType } @@ -15166,14 +21809,167 @@ export interface InferenceInferenceResult { rerank?: InferenceRankedDocument[] } +export interface InferenceJinaAIServiceSettings { + /** A valid API key of your JinaAI account. + * + * IMPORTANT: You need to provide the API key only once, during the inference model creation. + * The get inference endpoint API does not retrieve your API key. + * After creating the inference model, you cannot change the associated API key. + * If you want to use a different API key, delete the inference model and recreate it with the same name and the updated API key. */ + api_key: string + /** The name of the model to use for the inference task. + * For a `rerank` task, it is required. + * For a `text_embedding` task, it is optional. */ + model_id?: string + /** This setting helps to minimize the number of rate limit errors returned from JinaAI. + * By default, the `jinaai` service sets the number of requests allowed per minute to 2000 for all task types. */ + rate_limit?: InferenceRateLimitSetting + /** For a `text_embedding` task, the similarity measure. One of cosine, dot_product, l2_norm. + * The default values varies with the embedding type. + * For example, a float embedding type uses a `dot_product` similarity measure by default. */ + similarity?: InferenceJinaAISimilarityType +} + +export type InferenceJinaAIServiceType = 'jinaai' + +export type InferenceJinaAISimilarityType = 'cosine' | 'dot_product' | 'l2_norm' + +export interface InferenceJinaAITaskSettings { + /** For a `rerank` task, return the doc text within the results. */ + return_documents?: boolean + /** For a `text_embedding` task, the task passed to the model. + * Valid values are: + * + * * `classification`: Use it for embeddings passed through a text classifier. + * * `clustering`: Use it for the embeddings run through a clustering algorithm. + * * `ingest`: Use it for storing document embeddings in a vector database. + * * `search`: Use it for storing embeddings of search queries run against a vector database to find relevant documents. */ + task?: InferenceJinaAITextEmbeddingTask + /** For a `rerank` task, the number of most relevant documents to return. + * It defaults to the number of the documents. + * If this inference endpoint is used in a `text_similarity_reranker` retriever query and `top_n` is set, it must be greater than or equal to `rank_window_size` in the query. */ + top_n?: integer +} + +export type InferenceJinaAITaskType = 'rerank' | 'text_embedding' + +export type InferenceJinaAITextEmbeddingTask = 'classification' | 'clustering' | 'ingest' | 'search' + +export interface InferenceMessage { + /** The content of the message. */ + content?: InferenceMessageContent + /** The role of the message author. */ + role: string + /** The tool call that this message is responding to. */ + tool_call_id?: Id + /** The tool calls generated by the model. */ + tool_calls?: InferenceToolCall[] +} + +export type InferenceMessageContent = string | InferenceContentObject[] + +export interface InferenceMistralServiceSettings { + /** A valid API key of your Mistral account. + * You can find your Mistral API keys or you can create a new one on the API Keys page. + * + * IMPORTANT: You need to provide the API key only once, during the inference model creation. + * The get inference endpoint API does not retrieve your API key. + * After creating the inference model, you cannot change the associated API key. + * If you want to use a different API key, delete the inference model and recreate it with the same name and the updated API key. */ + api_key: string + /** The maximum number of tokens per input before chunking occurs. */ + max_input_tokens?: integer + /** The name of the model to use for the inference task. + * Refer to the Mistral models documentation for the list of available text embedding models. */ + model: string + /** This setting helps to minimize the number of rate limit errors returned from the Mistral API. + * By default, the `mistral` service sets the number of requests allowed per minute to 240. */ + rate_limit?: InferenceRateLimitSetting +} + +export type InferenceMistralServiceType = 'mistral' + +export type InferenceMistralTaskType = 'text_embedding' + +export interface InferenceOpenAIServiceSettings { + /** A valid API key of your OpenAI account. + * You can find your OpenAI API keys in your OpenAI account under the API keys section. + * + * IMPORTANT: You need to provide the API key only once, during the inference model creation. + * The get inference endpoint API does not retrieve your API key. + * After creating the inference model, you cannot change the associated API key. + * If you want to use a different API key, delete the inference model and recreate it with the same name and the updated API key. */ + api_key: string + /** The number of dimensions the resulting output embeddings should have. + * It is supported only in `text-embedding-3` and later models. + * If it is not set, the OpenAI defined default for the model is used. */ + dimensions?: integer + /** The name of the model to use for the inference task. + * Refer to the OpenAI documentation for the list of available text embedding models. */ + model_id: string + /** The unique identifier for your organization. + * You can find the Organization ID in your OpenAI account under *Settings > Organizations*. */ + organization_id?: string + /** This setting helps to minimize the number of rate limit errors returned from OpenAI. + * The `openai` service sets a default number of requests allowed per minute depending on the task type. + * For `text_embedding`, it is set to `3000`. + * For `completion`, it is set to `500`. */ + rate_limit?: InferenceRateLimitSetting + /** The URL endpoint to use for the requests. + * It can be changed for testing purposes. */ + url?: string +} + +export type InferenceOpenAIServiceType = 'openai' + +export interface InferenceOpenAITaskSettings { + /** For a `completion` or `text_embedding` task, specify the user issuing the request. + * This information can be used for abuse detection. */ + user?: string +} + +export type InferenceOpenAITaskType = 'chat_completion' | 'completion' | 'text_embedding' + export interface InferenceRankedDocument { index: integer relevance_score: float text?: string } +export interface InferenceRateLimitSetting { + /** The number of requests allowed per minute. */ + requests_per_minute?: integer +} + +export interface InferenceRequestChatCompletion { + /** A list of objects representing the conversation. */ + messages: InferenceMessage[] + /** The ID of the model to use. */ + model?: string + /** The upper bound limit for the number of tokens that can be generated for a completion request. */ + max_completion_tokens?: long + /** A sequence of strings to control when the model should stop generating additional tokens. */ + stop?: string[] + /** The sampling temperature to use. */ + temperature?: float + /** Controls which tool is called by the model. */ + tool_choice?: InferenceCompletionToolType + /** A list of tools that the model can call. */ + tools?: InferenceCompletionTool[] + /** Nucleus sampling, an alternative to sampling with temperature. */ + top_p?: float +} + +export interface InferenceRerankedInferenceResult { + rerank: InferenceRankedDocument[] +} + export type InferenceServiceSettings = any +export interface InferenceSparseEmbeddingInferenceResult { + sparse_embedding: InferenceSparseEmbeddingResult[] +} + export interface InferenceSparseEmbeddingResult { embedding: InferenceSparseVector } @@ -15182,179 +21978,634 @@ export type InferenceSparseVector = Record export type InferenceTaskSettings = any -export type InferenceTaskType = 'sparse_embedding' | 'text_embedding' | 'rerank' | 'completion' +export type InferenceTaskType = 'sparse_embedding' | 'text_embedding' | 'rerank' | 'completion' | 'chat_completion' export interface InferenceTextEmbeddingByteResult { embedding: InferenceDenseByteVector } -export interface InferenceTextEmbeddingResult { - embedding: InferenceDenseVector -} +export interface InferenceTextEmbeddingInferenceResult { + text_embedding_bytes?: InferenceTextEmbeddingByteResult[] + text_embedding_bits?: InferenceTextEmbeddingByteResult[] + text_embedding?: InferenceTextEmbeddingResult[] +} + +export interface InferenceTextEmbeddingResult { + embedding: InferenceDenseVector +} + +export interface InferenceToolCall { + /** The identifier of the tool call. */ + id: Id + /** The function that the model called. */ + function: InferenceToolCallFunction + /** The type of the tool call. */ + type: string +} + +export interface InferenceToolCallFunction { + /** The arguments to call the function with in JSON format. */ + arguments: string + /** The name of the function to call. */ + name: string +} + +export interface InferenceVoyageAIServiceSettings { + /** The number of dimensions for resulting output embeddings. + * This setting maps to `output_dimension` in the VoyageAI documentation. + * Only for the `text_embedding` task type. */ + dimensions?: integer + /** The name of the model to use for the inference task. + * Refer to the VoyageAI documentation for the list of available text embedding and rerank models. */ + model_id: string + /** This setting helps to minimize the number of rate limit errors returned from VoyageAI. + * The `voyageai` service sets a default number of requests allowed per minute depending on the task type. + * For both `text_embedding` and `rerank`, it is set to `2000`. */ + rate_limit?: InferenceRateLimitSetting + /** The data type for the embeddings to be returned. + * This setting maps to `output_dtype` in the VoyageAI documentation. + * Permitted values: float, int8, bit. + * `int8` is a synonym of `byte` in the VoyageAI documentation. + * `bit` is a synonym of `binary` in the VoyageAI documentation. + * Only for the `text_embedding` task type. */ + embedding_type?: float +} + +export type InferenceVoyageAIServiceType = 'voyageai' + +export interface InferenceVoyageAITaskSettings { + /** Type of the input text. + * Permitted values: `ingest` (maps to `document` in the VoyageAI documentation), `search` (maps to `query` in the VoyageAI documentation). + * Only for the `text_embedding` task type. */ + input_type?: string + /** Whether to return the source documents in the response. + * Only for the `rerank` task type. */ + return_documents?: boolean + /** The number of most relevant documents to return. + * If not specified, the reranking results of all documents will be returned. + * Only for the `rerank` task type. */ + top_k?: integer + /** Whether to truncate the input texts to fit within the context length. */ + truncation?: boolean +} + +export type InferenceVoyageAITaskType = 'text_embedding' | 'rerank' + +export interface InferenceWatsonxServiceSettings { + /** A valid API key of your Watsonx account. + * You can find your Watsonx API keys or you can create a new one on the API keys page. + * + * IMPORTANT: You need to provide the API key only once, during the inference model creation. + * The get inference endpoint API does not retrieve your API key. + * After creating the inference model, you cannot change the associated API key. + * If you want to use a different API key, delete the inference model and recreate it with the same name and the updated API key. */ + api_key: string + /** A version parameter that takes a version date in the format of `YYYY-MM-DD`. + * For the active version data parameters, refer to the Wastonx documentation. */ + api_version: string + /** The name of the model to use for the inference task. + * Refer to the IBM Embedding Models section in the Watsonx documentation for the list of available text embedding models. */ + model_id: string + /** The identifier of the IBM Cloud project to use for the inference task. */ + project_id: string + /** This setting helps to minimize the number of rate limit errors returned from Watsonx. + * By default, the `watsonxai` service sets the number of requests allowed per minute to 120. */ + rate_limit?: InferenceRateLimitSetting + /** The URL of the inference endpoint that you created on Watsonx. */ + url: string +} + +export type InferenceWatsonxServiceType = 'watsonxai' + +export type InferenceWatsonxTaskType = 'text_embedding' + +export interface InferenceChatCompletionUnifiedRequest extends RequestBase { + /** The inference Id */ + inference_id: Id + /** Specifies the amount of time to wait for the inference request to complete. */ + timeout?: Duration + chat_completion_request?: InferenceRequestChatCompletion + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { inference_id?: never, timeout?: never, chat_completion_request?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { inference_id?: never, timeout?: never, chat_completion_request?: never } +} + +export type InferenceChatCompletionUnifiedResponse = StreamResult + +export interface InferenceCompletionRequest extends RequestBase { + /** The inference Id */ + inference_id: Id + /** Specifies the amount of time to wait for the inference request to complete. */ + timeout?: Duration + /** Inference input. + * Either a string or an array of strings. */ + input: string | string[] + /** Optional task settings */ + task_settings?: InferenceTaskSettings + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { inference_id?: never, timeout?: never, input?: never, task_settings?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { inference_id?: never, timeout?: never, input?: never, task_settings?: never } +} + +export type InferenceCompletionResponse = InferenceCompletionInferenceResult + +export interface InferenceDeleteRequest extends RequestBase { + /** The task type */ + task_type?: InferenceTaskType + /** The inference identifier. */ + inference_id: Id + /** When true, the endpoint is not deleted and a list of ingest processors which reference this endpoint is returned. */ + dry_run?: boolean + /** When true, the inference endpoint is forcefully deleted even if it is still being used by ingest processors or semantic text fields. */ + force?: boolean + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { task_type?: never, inference_id?: never, dry_run?: never, force?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { task_type?: never, inference_id?: never, dry_run?: never, force?: never } +} + +export type InferenceDeleteResponse = InferenceDeleteInferenceEndpointResult + +export interface InferenceGetRequest extends RequestBase { + /** The task type */ + task_type?: InferenceTaskType + /** The inference Id */ + inference_id?: Id + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { task_type?: never, inference_id?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { task_type?: never, inference_id?: never } +} + +export interface InferenceGetResponse { + endpoints: InferenceInferenceEndpointInfo[] +} + +export interface InferenceInferenceRequest extends RequestBase { + /** The type of inference task that the model performs. */ + task_type?: InferenceTaskType + /** The unique identifier for the inference endpoint. */ + inference_id: Id + /** The amount of time to wait for the inference request to complete. */ + timeout?: Duration + /** The query input, which is required only for the `rerank` task. + * It is not required for other tasks. */ + query?: string + /** The text on which you want to perform the inference task. + * It can be a single string or an array. + * + * > info + * > Inference endpoints for the `completion` task type currently only support a single string as input. */ + input: string | string[] + /** Task settings for the individual inference request. + * These settings are specific to the task type you specified and override the task settings specified when initializing the service. */ + task_settings?: InferenceTaskSettings + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { task_type?: never, inference_id?: never, timeout?: never, query?: never, input?: never, task_settings?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { task_type?: never, inference_id?: never, timeout?: never, query?: never, input?: never, task_settings?: never } +} + +export type InferenceInferenceResponse = InferenceInferenceResult + +export interface InferencePutRequest extends RequestBase { + /** The task type */ + task_type?: InferenceTaskType + /** The inference Id */ + inference_id: Id + inference_config?: InferenceInferenceEndpoint + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { task_type?: never, inference_id?: never, inference_config?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { task_type?: never, inference_id?: never, inference_config?: never } +} + +export type InferencePutResponse = InferenceInferenceEndpointInfo + +export interface InferencePutAlibabacloudRequest extends RequestBase { + /** The type of the inference task that the model will perform. */ + task_type: InferenceAlibabaCloudTaskType + /** The unique identifier of the inference endpoint. */ + alibabacloud_inference_id: Id + /** The chunking configuration object. */ + chunking_settings?: InferenceInferenceChunkingSettings + /** The type of service supported for the specified task type. In this case, `alibabacloud-ai-search`. */ + service: InferenceAlibabaCloudServiceType + /** Settings used to install the inference model. These settings are specific to the `alibabacloud-ai-search` service. */ + service_settings: InferenceAlibabaCloudServiceSettings + /** Settings to configure the inference task. + * These settings are specific to the task type you specified. */ + task_settings?: InferenceAlibabaCloudTaskSettings + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { task_type?: never, alibabacloud_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { task_type?: never, alibabacloud_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } +} + +export type InferencePutAlibabacloudResponse = InferenceInferenceEndpointInfo + +export interface InferencePutAmazonbedrockRequest extends RequestBase { + /** The type of the inference task that the model will perform. */ + task_type: InferenceAmazonBedrockTaskType + /** The unique identifier of the inference endpoint. */ + amazonbedrock_inference_id: Id + /** The chunking configuration object. */ + chunking_settings?: InferenceInferenceChunkingSettings + /** The type of service supported for the specified task type. In this case, `amazonbedrock`. */ + service: InferenceAmazonBedrockServiceType + /** Settings used to install the inference model. These settings are specific to the `amazonbedrock` service. */ + service_settings: InferenceAmazonBedrockServiceSettings + /** Settings to configure the inference task. + * These settings are specific to the task type you specified. */ + task_settings?: InferenceAmazonBedrockTaskSettings + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { task_type?: never, amazonbedrock_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { task_type?: never, amazonbedrock_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } +} + +export type InferencePutAmazonbedrockResponse = InferenceInferenceEndpointInfo + +export interface InferencePutAnthropicRequest extends RequestBase { + /** The task type. + * The only valid task type for the model to perform is `completion`. */ + task_type: InferenceAnthropicTaskType + /** The unique identifier of the inference endpoint. */ + anthropic_inference_id: Id + /** The chunking configuration object. */ + chunking_settings?: InferenceInferenceChunkingSettings + /** The type of service supported for the specified task type. In this case, `anthropic`. */ + service: InferenceAnthropicServiceType + /** Settings used to install the inference model. These settings are specific to the `watsonxai` service. */ + service_settings: InferenceAnthropicServiceSettings + /** Settings to configure the inference task. + * These settings are specific to the task type you specified. */ + task_settings?: InferenceAnthropicTaskSettings + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { task_type?: never, anthropic_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { task_type?: never, anthropic_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } +} + +export type InferencePutAnthropicResponse = InferenceInferenceEndpointInfo + +export interface InferencePutAzureaistudioRequest extends RequestBase { + /** The type of the inference task that the model will perform. */ + task_type: InferenceAzureAiStudioTaskType + /** The unique identifier of the inference endpoint. */ + azureaistudio_inference_id: Id + /** The chunking configuration object. */ + chunking_settings?: InferenceInferenceChunkingSettings + /** The type of service supported for the specified task type. In this case, `azureaistudio`. */ + service: InferenceAzureAiStudioServiceType + /** Settings used to install the inference model. These settings are specific to the `openai` service. */ + service_settings: InferenceAzureAiStudioServiceSettings + /** Settings to configure the inference task. + * These settings are specific to the task type you specified. */ + task_settings?: InferenceAzureAiStudioTaskSettings + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { task_type?: never, azureaistudio_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { task_type?: never, azureaistudio_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } +} + +export type InferencePutAzureaistudioResponse = InferenceInferenceEndpointInfo + +export interface InferencePutAzureopenaiRequest extends RequestBase { + /** The type of the inference task that the model will perform. + * NOTE: The `chat_completion` task type only supports streaming and only through the _stream API. */ + task_type: InferenceAzureOpenAITaskType + /** The unique identifier of the inference endpoint. */ + azureopenai_inference_id: Id + /** The chunking configuration object. */ + chunking_settings?: InferenceInferenceChunkingSettings + /** The type of service supported for the specified task type. In this case, `azureopenai`. */ + service: InferenceAzureOpenAIServiceType + /** Settings used to install the inference model. These settings are specific to the `azureopenai` service. */ + service_settings: InferenceAzureOpenAIServiceSettings + /** Settings to configure the inference task. + * These settings are specific to the task type you specified. */ + task_settings?: InferenceAzureOpenAITaskSettings + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { task_type?: never, azureopenai_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { task_type?: never, azureopenai_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } +} + +export type InferencePutAzureopenaiResponse = InferenceInferenceEndpointInfo + +export interface InferencePutCohereRequest extends RequestBase { + /** The type of the inference task that the model will perform. */ + task_type: InferenceCohereTaskType + /** The unique identifier of the inference endpoint. */ + cohere_inference_id: Id + /** The chunking configuration object. */ + chunking_settings?: InferenceInferenceChunkingSettings + /** The type of service supported for the specified task type. In this case, `cohere`. */ + service: InferenceCohereServiceType + /** Settings used to install the inference model. + * These settings are specific to the `cohere` service. */ + service_settings: InferenceCohereServiceSettings + /** Settings to configure the inference task. + * These settings are specific to the task type you specified. */ + task_settings?: InferenceCohereTaskSettings + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { task_type?: never, cohere_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { task_type?: never, cohere_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } +} + +export type InferencePutCohereResponse = InferenceInferenceEndpointInfo + +export interface InferencePutElasticsearchRequest extends RequestBase { + /** The type of the inference task that the model will perform. */ + task_type: InferenceElasticsearchTaskType + /** The unique identifier of the inference endpoint. + * The must not match the `model_id`. */ + elasticsearch_inference_id: Id + /** The chunking configuration object. */ + chunking_settings?: InferenceInferenceChunkingSettings + /** The type of service supported for the specified task type. In this case, `elasticsearch`. */ + service: InferenceElasticsearchServiceType + /** Settings used to install the inference model. These settings are specific to the `elasticsearch` service. */ + service_settings: InferenceElasticsearchServiceSettings + /** Settings to configure the inference task. + * These settings are specific to the task type you specified. */ + task_settings?: InferenceElasticsearchTaskSettings + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { task_type?: never, elasticsearch_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { task_type?: never, elasticsearch_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } +} + +export type InferencePutElasticsearchResponse = InferenceInferenceEndpointInfo + +export interface InferencePutElserRequest extends RequestBase { + /** The type of the inference task that the model will perform. */ + task_type: InferenceElserTaskType + /** The unique identifier of the inference endpoint. */ + elser_inference_id: Id + /** The chunking configuration object. */ + chunking_settings?: InferenceInferenceChunkingSettings + /** The type of service supported for the specified task type. In this case, `elser`. */ + service: InferenceElserServiceType + /** Settings used to install the inference model. These settings are specific to the `elser` service. */ + service_settings: InferenceElserServiceSettings + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { task_type?: never, elser_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { task_type?: never, elser_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never } +} + +export type InferencePutElserResponse = InferenceInferenceEndpointInfo + +export interface InferencePutGoogleaistudioRequest extends RequestBase { + /** The type of the inference task that the model will perform. */ + task_type: InferenceGoogleAiStudioTaskType + /** The unique identifier of the inference endpoint. */ + googleaistudio_inference_id: Id + /** The chunking configuration object. */ + chunking_settings?: InferenceInferenceChunkingSettings + /** The type of service supported for the specified task type. In this case, `googleaistudio`. */ + service: InferenceGoogleAiServiceType + /** Settings used to install the inference model. These settings are specific to the `googleaistudio` service. */ + service_settings: InferenceGoogleAiStudioServiceSettings + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { task_type?: never, googleaistudio_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { task_type?: never, googleaistudio_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never } +} + +export type InferencePutGoogleaistudioResponse = InferenceInferenceEndpointInfo + +export interface InferencePutGooglevertexaiRequest extends RequestBase { + /** The type of the inference task that the model will perform. */ + task_type: InferenceGoogleVertexAITaskType + /** The unique identifier of the inference endpoint. */ + googlevertexai_inference_id: Id + /** The chunking configuration object. */ + chunking_settings?: InferenceInferenceChunkingSettings + /** The type of service supported for the specified task type. In this case, `googlevertexai`. */ + service: InferenceGoogleVertexAIServiceType + /** Settings used to install the inference model. These settings are specific to the `googlevertexai` service. */ + service_settings: InferenceGoogleVertexAIServiceSettings + /** Settings to configure the inference task. + * These settings are specific to the task type you specified. */ + task_settings?: InferenceGoogleVertexAITaskSettings + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { task_type?: never, googlevertexai_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { task_type?: never, googlevertexai_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } +} + +export type InferencePutGooglevertexaiResponse = InferenceInferenceEndpointInfo + +export interface InferencePutHuggingFaceRequest extends RequestBase { + /** The type of the inference task that the model will perform. */ + task_type: InferenceHuggingFaceTaskType + /** The unique identifier of the inference endpoint. */ + huggingface_inference_id: Id + /** The chunking configuration object. */ + chunking_settings?: InferenceInferenceChunkingSettings + /** The type of service supported for the specified task type. In this case, `hugging_face`. */ + service: InferenceHuggingFaceServiceType + /** Settings used to install the inference model. These settings are specific to the `hugging_face` service. */ + service_settings: InferenceHuggingFaceServiceSettings + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { task_type?: never, huggingface_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { task_type?: never, huggingface_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never } +} + +export type InferencePutHuggingFaceResponse = InferenceInferenceEndpointInfo + +export interface InferencePutJinaaiRequest extends RequestBase { + /** The type of the inference task that the model will perform. */ + task_type: InferenceJinaAITaskType + /** The unique identifier of the inference endpoint. */ + jinaai_inference_id: Id + /** The chunking configuration object. */ + chunking_settings?: InferenceInferenceChunkingSettings + /** The type of service supported for the specified task type. In this case, `jinaai`. */ + service: InferenceJinaAIServiceType + /** Settings used to install the inference model. These settings are specific to the `jinaai` service. */ + service_settings: InferenceJinaAIServiceSettings + /** Settings to configure the inference task. + * These settings are specific to the task type you specified. */ + task_settings?: InferenceJinaAITaskSettings + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { task_type?: never, jinaai_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { task_type?: never, jinaai_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } +} + +export type InferencePutJinaaiResponse = InferenceInferenceEndpointInfo + +export interface InferencePutMistralRequest extends RequestBase { + /** The task type. + * The only valid task type for the model to perform is `text_embedding`. */ + task_type: InferenceMistralTaskType + /** The unique identifier of the inference endpoint. */ + mistral_inference_id: Id + /** The chunking configuration object. */ + chunking_settings?: InferenceInferenceChunkingSettings + /** The type of service supported for the specified task type. In this case, `mistral`. */ + service: InferenceMistralServiceType + /** Settings used to install the inference model. These settings are specific to the `mistral` service. */ + service_settings: InferenceMistralServiceSettings + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { task_type?: never, mistral_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { task_type?: never, mistral_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never } +} + +export type InferencePutMistralResponse = InferenceInferenceEndpointInfo + +export interface InferencePutOpenaiRequest extends RequestBase { + /** The type of the inference task that the model will perform. + * NOTE: The `chat_completion` task type only supports streaming and only through the _stream API. */ + task_type: InferenceOpenAITaskType + /** The unique identifier of the inference endpoint. */ + openai_inference_id: Id + /** The chunking configuration object. */ + chunking_settings?: InferenceInferenceChunkingSettings + /** The type of service supported for the specified task type. In this case, `openai`. */ + service: InferenceOpenAIServiceType + /** Settings used to install the inference model. These settings are specific to the `openai` service. */ + service_settings: InferenceOpenAIServiceSettings + /** Settings to configure the inference task. + * These settings are specific to the task type you specified. */ + task_settings?: InferenceOpenAITaskSettings + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { task_type?: never, openai_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { task_type?: never, openai_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } +} + +export type InferencePutOpenaiResponse = InferenceInferenceEndpointInfo -export interface InferenceDeleteRequest extends RequestBase { -/** The task type */ - task_type?: InferenceTaskType - /** The inference identifier. */ - inference_id: Id - /** When true, the endpoint is not deleted and a list of ingest processors which reference this endpoint is returned. */ - dry_run?: boolean - /** When true, the inference endpoint is forcefully deleted even if it is still being used by ingest processors or semantic text fields. */ - force?: boolean +export interface InferencePutVoyageaiRequest extends RequestBase { + /** The type of the inference task that the model will perform. */ + task_type: InferenceVoyageAITaskType + /** The unique identifier of the inference endpoint. */ + voyageai_inference_id: Id + /** The chunking configuration object. */ + chunking_settings?: InferenceInferenceChunkingSettings + /** The type of service supported for the specified task type. In this case, `voyageai`. */ + service: InferenceVoyageAIServiceType + /** Settings used to install the inference model. These settings are specific to the `voyageai` service. */ + service_settings: InferenceVoyageAIServiceSettings + /** Settings to configure the inference task. + * These settings are specific to the task type you specified. */ + task_settings?: InferenceVoyageAITaskSettings /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { task_type?: never, inference_id?: never, dry_run?: never, force?: never } + body?: string | { [key: string]: any } & { task_type?: never, voyageai_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { task_type?: never, inference_id?: never, dry_run?: never, force?: never } + querystring?: { [key: string]: any } & { task_type?: never, voyageai_inference_id?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } } -export type InferenceDeleteResponse = InferenceDeleteInferenceEndpointResult +export type InferencePutVoyageaiResponse = InferenceInferenceEndpointInfo -export interface InferenceGetRequest extends RequestBase { -/** The task type */ - task_type?: InferenceTaskType - /** The inference Id */ - inference_id?: Id +export interface InferencePutWatsonxRequest extends RequestBase { + /** The task type. + * The only valid task type for the model to perform is `text_embedding`. */ + task_type: InferenceWatsonxTaskType + /** The unique identifier of the inference endpoint. */ + watsonx_inference_id: Id + /** The type of service supported for the specified task type. In this case, `watsonxai`. */ + service: InferenceWatsonxServiceType + /** Settings used to install the inference model. These settings are specific to the `watsonxai` service. */ + service_settings: InferenceWatsonxServiceSettings /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { task_type?: never, inference_id?: never } + body?: string | { [key: string]: any } & { task_type?: never, watsonx_inference_id?: never, service?: never, service_settings?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { task_type?: never, inference_id?: never } + querystring?: { [key: string]: any } & { task_type?: never, watsonx_inference_id?: never, service?: never, service_settings?: never } } -export interface InferenceGetResponse { - endpoints: InferenceInferenceEndpointInfo[] -} +export type InferencePutWatsonxResponse = InferenceInferenceEndpointInfo -export interface InferenceInferenceRequest extends RequestBase { -/** The type of inference task that the model performs. */ - task_type?: InferenceTaskType +export interface InferenceRerankRequest extends RequestBase { /** The unique identifier for the inference endpoint. */ inference_id: Id /** The amount of time to wait for the inference request to complete. */ timeout?: Duration - /** The query input, which is required only for the `rerank` task. It is not required for other tasks. */ - query?: string - /** The text on which you want to perform the inference task. It can be a single string or an array. > info > Inference endpoints for the `completion` task type currently only support a single string as input. */ + /** Query input. */ + query: string + /** The text on which you want to perform the inference task. + * It can be a single string or an array. + * + * > info + * > Inference endpoints for the `completion` task type currently only support a single string as input. */ input: string | string[] - /** Task settings for the individual inference request. These settings are specific to the task type you specified and override the task settings specified when initializing the service. */ + /** Task settings for the individual inference request. + * These settings are specific to the task type you specified and override the task settings specified when initializing the service. */ task_settings?: InferenceTaskSettings /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { task_type?: never, inference_id?: never, timeout?: never, query?: never, input?: never, task_settings?: never } + body?: string | { [key: string]: any } & { inference_id?: never, timeout?: never, query?: never, input?: never, task_settings?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { task_type?: never, inference_id?: never, timeout?: never, query?: never, input?: never, task_settings?: never } + querystring?: { [key: string]: any } & { inference_id?: never, timeout?: never, query?: never, input?: never, task_settings?: never } } -export type InferenceInferenceResponse = InferenceInferenceResult +export type InferenceRerankResponse = InferenceRerankedInferenceResult -export interface InferencePutRequest extends RequestBase { -/** The task type */ - task_type?: InferenceTaskType +export interface InferenceSparseEmbeddingRequest extends RequestBase { /** The inference Id */ inference_id: Id - inference_config?: InferenceInferenceEndpoint + /** Specifies the amount of time to wait for the inference request to complete. */ + timeout?: Duration + /** Inference input. + * Either a string or an array of strings. */ + input: string | string[] + /** Optional task settings */ + task_settings?: InferenceTaskSettings /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { task_type?: never, inference_id?: never, inference_config?: never } + body?: string | { [key: string]: any } & { inference_id?: never, timeout?: never, input?: never, task_settings?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { task_type?: never, inference_id?: never, inference_config?: never } + querystring?: { [key: string]: any } & { inference_id?: never, timeout?: never, input?: never, task_settings?: never } } -export type InferencePutResponse = InferenceInferenceEndpointInfo +export type InferenceSparseEmbeddingResponse = InferenceSparseEmbeddingInferenceResult -export interface InferenceStreamInferenceRequest extends RequestBase { -/** The unique identifier for the inference endpoint. */ +export interface InferenceStreamCompletionRequest extends RequestBase { + /** The unique identifier for the inference endpoint. */ inference_id: Id - /** The type of task that the model performs. */ - task_type?: InferenceTaskType - /** The text on which you want to perform the inference task. It can be a single string or an array. NOTE: Inference endpoints for the completion task type currently only support a single string as input. */ + /** The text on which you want to perform the inference task. + * It can be a single string or an array. + * + * NOTE: Inference endpoints for the completion task type currently only support a single string as input. */ input: string | string[] + /** Optional task settings */ + task_settings?: InferenceTaskSettings /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { inference_id?: never, task_type?: never, input?: never } + body?: string | { [key: string]: any } & { inference_id?: never, input?: never, task_settings?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { inference_id?: never, task_type?: never, input?: never } -} - -export type InferenceStreamInferenceResponse = StreamResult - -export interface InferenceUnifiedInferenceCompletionTool { - type: string - function: InferenceUnifiedInferenceCompletionToolFunction -} - -export interface InferenceUnifiedInferenceCompletionToolChoice { - type: string - function: InferenceUnifiedInferenceCompletionToolChoiceFunction -} - -export interface InferenceUnifiedInferenceCompletionToolChoiceFunction { - name: string -} - -export interface InferenceUnifiedInferenceCompletionToolFunction { - description?: string - name: string - parameters?: any - strict?: boolean -} - -export type InferenceUnifiedInferenceCompletionToolType = string | InferenceUnifiedInferenceCompletionToolChoice - -export interface InferenceUnifiedInferenceContentObject { - text: string - type: string -} - -export interface InferenceUnifiedInferenceMessage { - content?: InferenceUnifiedInferenceMessageContent - role: string - tool_call_id?: Id - tool_calls?: InferenceUnifiedInferenceToolCall[] + querystring?: { [key: string]: any } & { inference_id?: never, input?: never, task_settings?: never } } -export type InferenceUnifiedInferenceMessageContent = string | InferenceUnifiedInferenceContentObject[] +export type InferenceStreamCompletionResponse = StreamResult -export interface InferenceUnifiedInferenceRequest extends RequestBase { -/** The task type */ - task_type?: InferenceTaskType +export interface InferenceTextEmbeddingRequest extends RequestBase { /** The inference Id */ inference_id: Id /** Specifies the amount of time to wait for the inference request to complete. */ timeout?: Duration - /** A list of objects representing the conversation. */ - messages: InferenceUnifiedInferenceMessage[] - /** The ID of the model to use. */ - model?: string - /** The upper bound limit for the number of tokens that can be generated for a completion request. */ - max_completion_tokens?: long - /** A sequence of strings to control when the model should stop generating additional tokens. */ - stop?: string[] - /** The sampling temperature to use. */ - temperature?: float - /** Controls which tool is called by the model. */ - tool_choice?: InferenceUnifiedInferenceCompletionToolType - /** A list of tools that the model can call. */ - tools?: InferenceUnifiedInferenceCompletionTool[] - /** Nucleus sampling, an alternative to sampling with temperature. */ - top_p?: float + /** Inference input. + * Either a string or an array of strings. */ + input: string | string[] + /** Optional task settings */ + task_settings?: InferenceTaskSettings /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { task_type?: never, inference_id?: never, timeout?: never, messages?: never, model?: never, max_completion_tokens?: never, stop?: never, temperature?: never, tool_choice?: never, tools?: never, top_p?: never } + body?: string | { [key: string]: any } & { inference_id?: never, timeout?: never, input?: never, task_settings?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { task_type?: never, inference_id?: never, timeout?: never, messages?: never, model?: never, max_completion_tokens?: never, stop?: never, temperature?: never, tool_choice?: never, tools?: never, top_p?: never } -} - -export type InferenceUnifiedInferenceResponse = StreamResult - -export interface InferenceUnifiedInferenceToolCall { - id: Id - function: InferenceUnifiedInferenceToolCallFunction - type: string + querystring?: { [key: string]: any } & { inference_id?: never, timeout?: never, input?: never, task_settings?: never } } -export interface InferenceUnifiedInferenceToolCallFunction { - arguments: string - name: string -} +export type InferenceTextEmbeddingResponse = InferenceTextEmbeddingInferenceResult export interface InferenceUpdateRequest extends RequestBase { -/** The unique identifier of the inference endpoint. */ + /** The unique identifier of the inference endpoint. */ inference_id: Id /** The type of inference task that the model performs. */ task_type?: InferenceTaskType @@ -15368,70 +22619,126 @@ export interface InferenceUpdateRequest extends RequestBase { export type InferenceUpdateResponse = InferenceInferenceEndpointInfo export interface IngestAppendProcessor extends IngestProcessorBase { + /** The field to be appended to. + * Supports template snippets. */ field: Field + /** The value to be appended. Supports template snippets. */ value: any | any[] + /** If `false`, the processor does not append values already present in the field. */ allow_duplicates?: boolean } export interface IngestAttachmentProcessor extends IngestProcessorBase { + /** The field to get the base64 encoded field from. */ field: Field + /** If `true` and field does not exist, the processor quietly exits without modifying the document. */ ignore_missing?: boolean + /** The number of chars being used for extraction to prevent huge fields. + * Use `-1` for no limit. */ indexed_chars?: long + /** Field name from which you can overwrite the number of chars being used for extraction. */ indexed_chars_field?: Field + /** Array of properties to select to be stored. + * Can be `content`, `title`, `name`, `author`, `keywords`, `date`, `content_type`, `content_length`, `language`. */ properties?: string[] + /** The field that will hold the attachment information. */ target_field?: Field + /** If true, the binary field will be removed from the document */ remove_binary?: boolean + /** Field containing the name of the resource to decode. + * If specified, the processor passes this resource name to the underlying Tika library to enable Resource Name Based Detection. */ resource_name?: string } export interface IngestBytesProcessor extends IngestProcessorBase { + /** The field to convert. */ field: Field + /** If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document. */ ignore_missing?: boolean + /** The field to assign the converted value to. + * By default, the field is updated in-place. */ target_field?: Field } export interface IngestCircleProcessor extends IngestProcessorBase { + /** The difference between the resulting inscribed distance from center to side and the circle’s radius (measured in meters for `geo_shape`, unit-less for `shape`). */ error_distance: double + /** The field to interpret as a circle. Either a string in WKT format or a map for GeoJSON. */ field: Field + /** If `true` and `field` does not exist, the processor quietly exits without modifying the document. */ ignore_missing?: boolean + /** Which field mapping type is to be used when processing the circle: `geo_shape` or `shape`. */ shape_type: IngestShapeType + /** The field to assign the polygon shape to + * By default, the field is updated in-place. */ target_field?: Field } export interface IngestCommunityIDProcessor extends IngestProcessorBase { + /** Field containing the source IP address. */ source_ip?: Field + /** Field containing the source port. */ source_port?: Field + /** Field containing the destination IP address. */ destination_ip?: Field + /** Field containing the destination port. */ destination_port?: Field + /** Field containing the IANA number. */ iana_number?: Field + /** Field containing the ICMP type. */ icmp_type?: Field + /** Field containing the ICMP code. */ icmp_code?: Field + /** Field containing the transport protocol name or number. Used only when the + * iana_number field is not present. The following protocol names are currently + * supported: eigrp, gre, icmp, icmpv6, igmp, ipv6-icmp, ospf, pim, sctp, tcp, udp */ transport?: Field + /** Output field for the community ID. */ target_field?: Field + /** Seed for the community ID hash. Must be between 0 and 65535 (inclusive). The + * seed can prevent hash collisions between network domains, such as a staging + * and production network that use the same addressing scheme. */ seed?: integer + /** If true and any required fields are missing, the processor quietly exits + * without modifying the document. */ ignore_missing?: boolean } export interface IngestConvertProcessor extends IngestProcessorBase { + /** The field whose value is to be converted. */ field: Field + /** If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document. */ ignore_missing?: boolean + /** The field to assign the converted value to. + * By default, the `field` is updated in-place. */ target_field?: Field + /** The type to convert the existing value to. */ type: IngestConvertType } export type IngestConvertType = 'integer' | 'long' | 'double' | 'float' | 'boolean' | 'ip' | 'string' | 'auto' export interface IngestCsvProcessor extends IngestProcessorBase { + /** Value used to fill empty fields. + * Empty fields are skipped if this is not provided. + * An empty field is one with no value (2 consecutive separators) or empty quotes (`""`). */ empty_value?: any + /** The field to extract data from. */ field: Field + /** If `true` and `field` does not exist, the processor quietly exits without modifying the document. */ ignore_missing?: boolean + /** Quote used in CSV, has to be single character string. */ quote?: string + /** Separator used in CSV, has to be single character string. */ separator?: string + /** The array of fields to assign extracted values to. */ target_fields: Fields + /** Trim whitespaces in unquoted fields. */ trim?: boolean } export interface IngestDatabaseConfiguration { + /** The provider-assigned name of the IP geolocation database to download. */ name: Name maxmind?: IngestMaxmind ipinfo?: IngestIpinfo @@ -15440,49 +22747,86 @@ export interface IngestDatabaseConfiguration { export interface IngestDatabaseConfigurationFull { web?: IngestWeb local?: IngestLocal + /** The provider-assigned name of the IP geolocation database to download. */ name: Name maxmind?: IngestMaxmind ipinfo?: IngestIpinfo } export interface IngestDateIndexNameProcessor extends IngestProcessorBase { + /** An array of the expected date formats for parsing dates / timestamps in the document being preprocessed. + * Can be a java time pattern or one of the following formats: ISO8601, UNIX, UNIX_MS, or TAI64N. */ date_formats?: string[] + /** How to round the date when formatting the date into the index name. Valid values are: + * `y` (year), `M` (month), `w` (week), `d` (day), `h` (hour), `m` (minute) and `s` (second). + * Supports template snippets. */ date_rounding: string + /** The field to get the date or timestamp from. */ field: Field + /** The format to be used when printing the parsed date into the index name. + * A valid java time pattern is expected here. + * Supports template snippets. */ index_name_format?: string + /** A prefix of the index name to be prepended before the printed date. + * Supports template snippets. */ index_name_prefix?: string + /** The locale to use when parsing the date from the document being preprocessed, relevant when parsing month names or week days. */ locale?: string + /** The timezone to use when parsing the date and when date math index supports resolves expressions into concrete index names. */ timezone?: string } export interface IngestDateProcessor extends IngestProcessorBase { + /** The field to get the date from. */ field: Field + /** An array of the expected date formats. + * Can be a java time pattern or one of the following formats: ISO8601, UNIX, UNIX_MS, or TAI64N. */ formats: string[] + /** The locale to use when parsing the date, relevant when parsing month names or week days. + * Supports template snippets. */ locale?: string + /** The field that will hold the parsed date. */ target_field?: Field + /** The timezone to use when parsing the date. + * Supports template snippets. */ timezone?: string + /** The format to use when writing the date to target_field. Must be a valid + * java time pattern. */ output_format?: string } export interface IngestDissectProcessor extends IngestProcessorBase { + /** The character(s) that separate the appended fields. */ append_separator?: string + /** The field to dissect. */ field: Field + /** If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document. */ ignore_missing?: boolean + /** The pattern to apply to the field. */ pattern: string } export interface IngestDocument { + /** Unique identifier for the document. + * This ID must be unique within the `_index`. */ _id?: Id + /** Name of the index containing the document. */ _index?: IndexName + /** JSON body for the document. */ _source: any } export interface IngestDocumentSimulationKeys { + /** Unique identifier for the document. This ID must be unique within the `_index`. */ _id: Id + /** Name of the index containing the document. */ _index: IndexName _ingest: IngestIngest + /** Value used to send the document to a specific primary shard. */ _routing?: string + /** JSON body for the document. */ _source: Record + /** */ _version?: SpecUtilsStringified _version_type?: VersionType } @@ -15490,8 +22834,15 @@ export type IngestDocumentSimulation = IngestDocumentSimulationKeys & { [property: string]: string | Id | IndexName | IngestIngest | Record | SpecUtilsStringified | VersionType } export interface IngestDotExpanderProcessor extends IngestProcessorBase { + /** The field to expand into an object field. + * If set to `*`, all top-level fields will be expanded. */ field: Field + /** Controls the behavior when there is already an existing nested object that conflicts with the expanded field. + * When `false`, the processor will merge conflicts by combining the old and the new values into an array. + * When `true`, the value from the expanded field will overwrite the existing value. */ override?: boolean + /** The field that contains the field to expand. + * Only required if the field to expand is part another object field, because the `field` option can only understand leaf fields. */ path?: string } @@ -15499,44 +22850,81 @@ export interface IngestDropProcessor extends IngestProcessorBase { } export interface IngestEnrichProcessor extends IngestProcessorBase { + /** The field in the input document that matches the policies match_field used to retrieve the enrichment data. + * Supports template snippets. */ field: Field + /** If `true` and `field` does not exist, the processor quietly exits without modifying the document. */ ignore_missing?: boolean + /** The maximum number of matched documents to include under the configured target field. + * The `target_field` will be turned into a json array if `max_matches` is higher than 1, otherwise `target_field` will become a json object. + * In order to avoid documents getting too large, the maximum allowed value is 128. */ max_matches?: integer + /** If processor will update fields with pre-existing non-null-valued field. + * When set to `false`, such fields will not be touched. */ override?: boolean + /** The name of the enrich policy to use. */ policy_name: string + /** A spatial relation operator used to match the geoshape of incoming documents to documents in the enrich index. + * This option is only used for `geo_match` enrich policy types. */ shape_relation?: GeoShapeRelation + /** Field added to incoming documents to contain enrich data. This field contains both the `match_field` and `enrich_fields` specified in the enrich policy. + * Supports template snippets. */ target_field: Field } export interface IngestFailProcessor extends IngestProcessorBase { + /** The error message thrown by the processor. + * Supports template snippets. */ message: string } export type IngestFingerprintDigest = 'MD5' | 'SHA-1' | 'SHA-256' | 'SHA-512' | 'MurmurHash3' export interface IngestFingerprintProcessor extends IngestProcessorBase { + /** Array of fields to include in the fingerprint. For objects, the processor + * hashes both the field key and value. For other fields, the processor hashes + * only the field value. */ fields: Fields + /** Output field for the fingerprint. */ target_field?: Field + /** Salt value for the hash function. */ salt?: string + /** The hash method used to compute the fingerprint. Must be one of MD5, SHA-1, + * SHA-256, SHA-512, or MurmurHash3. */ method?: IngestFingerprintDigest + /** If true, the processor ignores any missing fields. If all fields are + * missing, the processor silently exits without modifying the document. */ ignore_missing?: boolean } export interface IngestForeachProcessor extends IngestProcessorBase { + /** Field containing array or object values. */ field: Field + /** If `true`, the processor silently exits without changing the document if the `field` is `null` or missing. */ ignore_missing?: boolean + /** Ingest processor to run on each element. */ processor: IngestProcessorContainer } export interface IngestGeoGridProcessor extends IngestProcessorBase { + /** The field to interpret as a geo-tile.= + * The field format is determined by the `tile_type`. */ field: string + /** Three tile formats are understood: geohash, geotile and geohex. */ tile_type: IngestGeoGridTileType + /** The field to assign the polygon shape to, by default, the `field` is updated in-place. */ target_field?: Field + /** If specified and a parent tile exists, save that tile address to this field. */ parent_field?: Field + /** If specified and children tiles exist, save those tile addresses to this field as an array of strings. */ children_field?: Field + /** If specified and intersecting non-child tiles exist, save their addresses to this field as an array of strings. */ non_children_field?: Field + /** If specified, save the tile precision (zoom) as an integer to this field. */ precision_field?: Field + /** If `true` and `field` does not exist, the processor quietly exits without modifying the document. */ ignore_missing?: boolean + /** Which format to save the generated polygon in. */ target_format?: IngestGeoGridTargetFormat } @@ -15545,61 +22933,110 @@ export type IngestGeoGridTargetFormat = 'geojson' | 'wkt' export type IngestGeoGridTileType = 'geotile' | 'geohex' | 'geohash' export interface IngestGeoIpProcessor extends IngestProcessorBase { + /** The database filename referring to a database the module ships with (GeoLite2-City.mmdb, GeoLite2-Country.mmdb, or GeoLite2-ASN.mmdb) or a custom database in the ingest-geoip config directory. */ database_file?: string + /** The field to get the ip address from for the geographical lookup. */ field: Field + /** If `true`, only the first found geoip data will be returned, even if the field contains an array. */ first_only?: boolean + /** If `true` and `field` does not exist, the processor quietly exits without modifying the document. */ ignore_missing?: boolean + /** Controls what properties are added to the `target_field` based on the geoip lookup. */ properties?: string[] + /** The field that will hold the geographical information looked up from the MaxMind database. */ target_field?: Field + /** If `true` (and if `ingest.geoip.downloader.eager.download` is `false`), the missing database is downloaded when the pipeline is created. + * Else, the download is triggered by when the pipeline is used as the `default_pipeline` or `final_pipeline` in an index. */ download_database_on_pipeline_creation?: boolean } export interface IngestGrokProcessor extends IngestProcessorBase { + /** Must be disabled or v1. If v1, the processor uses patterns with Elastic + * Common Schema (ECS) field names. */ ecs_compatibility?: string + /** The field to use for grok expression parsing. */ field: Field + /** If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document. */ ignore_missing?: boolean + /** A map of pattern-name and pattern tuples defining custom patterns to be used by the current processor. + * Patterns matching existing names will override the pre-existing definition. */ pattern_definitions?: Record + /** An ordered list of grok expression to match and extract named captures with. + * Returns on the first expression in the list that matches. */ patterns: GrokPattern[] + /** When `true`, `_ingest._grok_match_index` will be inserted into your matched document’s metadata with the index into the pattern found in `patterns` that matched. */ trace_match?: boolean } export interface IngestGsubProcessor extends IngestProcessorBase { + /** The field to apply the replacement to. */ field: Field + /** If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document. */ ignore_missing?: boolean + /** The pattern to be replaced. */ pattern: string + /** The string to replace the matching patterns with. */ replacement: string + /** The field to assign the converted value to + * By default, the `field` is updated in-place. */ target_field?: Field } export interface IngestHtmlStripProcessor extends IngestProcessorBase { + /** The string-valued field to remove HTML tags from. */ field: Field + /** If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document, */ ignore_missing?: boolean + /** The field to assign the converted value to + * By default, the `field` is updated in-place. */ target_field?: Field } export interface IngestInferenceConfig { + /** Regression configuration for inference. */ regression?: IngestInferenceConfigRegression + /** Classification configuration for inference. */ classification?: IngestInferenceConfigClassification } export interface IngestInferenceConfigClassification { + /** Specifies the number of top class predictions to return. */ num_top_classes?: integer + /** Specifies the maximum number of feature importance values per document. */ num_top_feature_importance_values?: integer + /** The field that is added to incoming documents to contain the inference prediction. */ results_field?: Field + /** Specifies the field to which the top classes are written. */ top_classes_results_field?: Field + /** Specifies the type of the predicted field to write. + * Valid values are: `string`, `number`, `boolean`. */ prediction_field_type?: string } export interface IngestInferenceConfigRegression { + /** The field that is added to incoming documents to contain the inference prediction. */ results_field?: Field + /** Specifies the maximum number of feature importance values per document. */ num_top_feature_importance_values?: integer } export interface IngestInferenceProcessor extends IngestProcessorBase { + /** The ID or alias for the trained model, or the ID of the deployment. */ model_id: Id + /** Field added to incoming documents to contain results objects. */ target_field?: Field + /** Maps the document field names to the known field names of the model. + * This mapping takes precedence over any default mappings provided in the model configuration. */ field_map?: Record + /** Contains the inference type and its options. */ inference_config?: IngestInferenceConfig + /** Input fields for inference and output (destination) fields for the inference results. + * This option is incompatible with the target_field and field_map options. */ + input_output?: IngestInputConfig | IngestInputConfig[] + /** If true and any of the input fields defined in input_ouput are missing + * then those missing fields are quietly ignored, otherwise a missing field causes a failure. + * Only applies when using input_output configurations to explicitly list the input fields. */ + ignore_missing?: boolean } export interface IngestIngest { @@ -15608,13 +23045,26 @@ export interface IngestIngest { pipeline?: Name } +export interface IngestInputConfig { + input_field: string + output_field: string +} + export interface IngestIpLocationProcessor extends IngestProcessorBase { + /** The database filename referring to a database the module ships with (GeoLite2-City.mmdb, GeoLite2-Country.mmdb, or GeoLite2-ASN.mmdb) or a custom database in the ingest-geoip config directory. */ database_file?: string + /** The field to get the ip address from for the geographical lookup. */ field: Field + /** If `true`, only the first found IP location data will be returned, even if the field contains an array. */ first_only?: boolean + /** If `true` and `field` does not exist, the processor quietly exits without modifying the document. */ ignore_missing?: boolean + /** Controls what properties are added to the `target_field` based on the IP location lookup. */ properties?: string[] + /** The field that will hold the geographical information looked up from the MaxMind database. */ target_field?: Field + /** If `true` (and if `ingest.geoip.downloader.eager.download` is `false`), the missing database is downloaded when the pipeline is created. + * Else, the download is triggered by when the pipeline is used as the `default_pipeline` or `final_pipeline` in an index. */ download_database_on_pipeline_creation?: boolean } @@ -15622,32 +23072,61 @@ export interface IngestIpinfo { } export interface IngestJoinProcessor extends IngestProcessorBase { + /** Field containing array values to join. */ field: Field + /** The separator character. */ separator: string + /** The field to assign the joined value to. + * By default, the field is updated in-place. */ target_field?: Field } export interface IngestJsonProcessor extends IngestProcessorBase { + /** Flag that forces the parsed JSON to be added at the top level of the document. + * `target_field` must not be set when this option is chosen. */ add_to_root?: boolean + /** When set to `replace`, root fields that conflict with fields from the parsed JSON will be overridden. + * When set to `merge`, conflicting fields will be merged. + * Only applicable `if add_to_root` is set to true. */ add_to_root_conflict_strategy?: IngestJsonProcessorConflictStrategy + /** When set to `true`, the JSON parser will not fail if the JSON contains duplicate keys. + * Instead, the last encountered value for any duplicate key wins. */ allow_duplicate_keys?: boolean + /** The field to be parsed. */ field: Field + /** The field that the converted structured object will be written into. + * Any existing content in this field will be overwritten. */ target_field?: Field } export type IngestJsonProcessorConflictStrategy = 'replace' | 'merge' export interface IngestKeyValueProcessor extends IngestProcessorBase { + /** List of keys to exclude from document. */ exclude_keys?: string[] + /** The field to be parsed. + * Supports template snippets. */ field: Field + /** Regex pattern to use for splitting key-value pairs. */ field_split: string + /** If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document. */ ignore_missing?: boolean + /** List of keys to filter and insert into document. + * Defaults to including all keys. */ include_keys?: string[] + /** Prefix to be added to extracted keys. */ prefix?: string + /** If `true`. strip brackets `()`, `<>`, `[]` as well as quotes `'` and `"` from extracted values. */ strip_brackets?: boolean + /** The field to insert the extracted keys into. + * Defaults to the root of the document. + * Supports template snippets. */ target_field?: Field + /** String of characters to trim from extracted keys. */ trim_key?: string + /** String of characters to trim from extracted values. */ trim_value?: string + /** Regex pattern to use for splitting the key from the value within a key-value pair. */ value_split: string } @@ -15656,8 +23135,12 @@ export interface IngestLocal { } export interface IngestLowercaseProcessor extends IngestProcessorBase { + /** The field to make lowercase. */ field: Field + /** If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document. */ ignore_missing?: boolean + /** The field to assign the converted value to. + * By default, the field is updated in-place. */ target_field?: Field } @@ -15666,31 +23149,57 @@ export interface IngestMaxmind { } export interface IngestNetworkDirectionProcessor extends IngestProcessorBase { + /** Field containing the source IP address. */ source_ip?: Field + /** Field containing the destination IP address. */ destination_ip?: Field + /** Output field for the network direction. */ target_field?: Field + /** List of internal networks. Supports IPv4 and IPv6 addresses and ranges in + * CIDR notation. Also supports the named ranges listed below. These may be + * constructed with template snippets. Must specify only one of + * internal_networks or internal_networks_field. */ internal_networks?: string[] + /** A field on the given document to read the internal_networks configuration + * from. */ internal_networks_field?: Field + /** If true and any required fields are missing, the processor quietly exits + * without modifying the document. */ ignore_missing?: boolean } export interface IngestPipeline { + /** Description of the ingest pipeline. */ description?: string + /** Processors to run immediately after a processor failure. */ on_failure?: IngestProcessorContainer[] + /** Processors used to perform transformations on documents before indexing. + * Processors run sequentially in the order specified. */ processors?: IngestProcessorContainer[] + /** Version number used by external systems to track ingest pipelines. */ version?: VersionNumber + /** Marks this ingest pipeline as deprecated. + * When a deprecated ingest pipeline is referenced as the default or final pipeline when creating or updating a non-deprecated index template, Elasticsearch will emit a deprecation warning. */ deprecated?: boolean + /** Arbitrary metadata about the ingest pipeline. This map is not automatically generated by Elasticsearch. */ _meta?: Metadata } export interface IngestPipelineConfig { + /** Description of the ingest pipeline. */ description?: string + /** Version number used by external systems to track ingest pipelines. */ version?: VersionNumber + /** Processors used to perform transformations on documents before indexing. + * Processors run sequentially in the order specified. */ processors: IngestProcessorContainer[] } export interface IngestPipelineProcessor extends IngestProcessorBase { + /** The name of the pipeline to execute. + * Supports template snippets. */ name: Name + /** Whether to ignore missing pipelines instead of failing. */ ignore_missing_pipeline?: boolean } @@ -15705,118 +23214,275 @@ export interface IngestPipelineSimulation { } export interface IngestProcessorBase { + /** Description of the processor. + * Useful for describing the purpose of the processor or its configuration. */ description?: string - if?: Script | string + /** Conditionally execute the processor. */ + if?: Script | ScriptSource + /** Ignore failures for the processor. */ ignore_failure?: boolean + /** Handle failures for the processor. */ on_failure?: IngestProcessorContainer[] + /** Identifier for the processor. + * Useful for debugging and metrics. */ tag?: string } export interface IngestProcessorContainer { + /** Appends one or more values to an existing array if the field already exists and it is an array. + * Converts a scalar to an array and appends one or more values to it if the field exists and it is a scalar. + * Creates an array containing the provided values if the field doesn’t exist. + * Accepts a single value or an array of values. */ append?: IngestAppendProcessor + /** The attachment processor lets Elasticsearch extract file attachments in common formats (such as PPT, XLS, and PDF) by using the Apache text extraction library Tika. */ attachment?: IngestAttachmentProcessor + /** Converts a human readable byte value (for example `1kb`) to its value in bytes (for example `1024`). + * If the field is an array of strings, all members of the array will be converted. + * Supported human readable units are "b", "kb", "mb", "gb", "tb", "pb" case insensitive. + * An error will occur if the field is not a supported format or resultant value exceeds 2^63. */ bytes?: IngestBytesProcessor + /** Converts circle definitions of shapes to regular polygons which approximate them. */ circle?: IngestCircleProcessor + /** Computes the Community ID for network flow data as defined in the + * Community ID Specification. You can use a community ID to correlate network + * events related to a single flow. */ community_id?: IngestCommunityIDProcessor + /** Converts a field in the currently ingested document to a different type, such as converting a string to an integer. + * If the field value is an array, all members will be converted. */ convert?: IngestConvertProcessor + /** Extracts fields from CSV line out of a single text field within a document. + * Any empty field in CSV will be skipped. */ csv?: IngestCsvProcessor + /** Parses dates from fields, and then uses the date or timestamp as the timestamp for the document. */ date?: IngestDateProcessor + /** The purpose of this processor is to point documents to the right time based index based on a date or timestamp field in a document by using the date math index name support. */ date_index_name?: IngestDateIndexNameProcessor + /** Extracts structured fields out of a single text field by matching the text field against a delimiter-based pattern. */ dissect?: IngestDissectProcessor + /** Expands a field with dots into an object field. + * This processor allows fields with dots in the name to be accessible by other processors in the pipeline. + * Otherwise these fields can’t be accessed by any processor. */ dot_expander?: IngestDotExpanderProcessor + /** Drops the document without raising any errors. + * This is useful to prevent the document from getting indexed based on some condition. */ drop?: IngestDropProcessor + /** The `enrich` processor can enrich documents with data from another index. */ enrich?: IngestEnrichProcessor + /** Raises an exception. + * This is useful for when you expect a pipeline to fail and want to relay a specific message to the requester. */ fail?: IngestFailProcessor + /** Computes a hash of the document’s content. You can use this hash for + * content fingerprinting. */ fingerprint?: IngestFingerprintProcessor + /** Runs an ingest processor on each element of an array or object. */ foreach?: IngestForeachProcessor + /** Currently an undocumented alias for GeoIP Processor. */ ip_location?: IngestIpLocationProcessor + /** Converts geo-grid definitions of grid tiles or cells to regular bounding boxes or polygons which describe their shape. + * This is useful if there is a need to interact with the tile shapes as spatially indexable fields. */ geo_grid?: IngestGeoGridProcessor + /** The `geoip` processor adds information about the geographical location of an IPv4 or IPv6 address. */ geoip?: IngestGeoIpProcessor + /** Extracts structured fields out of a single text field within a document. + * You choose which field to extract matched fields from, as well as the grok pattern you expect will match. + * A grok pattern is like a regular expression that supports aliased expressions that can be reused. */ grok?: IngestGrokProcessor + /** Converts a string field by applying a regular expression and a replacement. + * If the field is an array of string, all members of the array will be converted. + * If any non-string values are encountered, the processor will throw an exception. */ gsub?: IngestGsubProcessor + /** Removes HTML tags from the field. + * If the field is an array of strings, HTML tags will be removed from all members of the array. */ html_strip?: IngestHtmlStripProcessor + /** Uses a pre-trained data frame analytics model or a model deployed for natural language processing tasks to infer against the data that is being ingested in the pipeline. */ inference?: IngestInferenceProcessor + /** Joins each element of an array into a single string using a separator character between each element. + * Throws an error when the field is not an array. */ join?: IngestJoinProcessor + /** Converts a JSON string into a structured JSON object. */ json?: IngestJsonProcessor + /** This processor helps automatically parse messages (or specific event fields) which are of the `foo=bar` variety. */ kv?: IngestKeyValueProcessor + /** Converts a string to its lowercase equivalent. + * If the field is an array of strings, all members of the array will be converted. */ lowercase?: IngestLowercaseProcessor + /** Calculates the network direction given a source IP address, destination IP + * address, and a list of internal networks. */ network_direction?: IngestNetworkDirectionProcessor + /** Executes another pipeline. */ pipeline?: IngestPipelineProcessor + /** The Redact processor uses the Grok rules engine to obscure text in the input document matching the given Grok patterns. + * The processor can be used to obscure Personal Identifying Information (PII) by configuring it to detect known patterns such as email or IP addresses. + * Text that matches a Grok pattern is replaced with a configurable string such as `` where an email address is matched or simply replace all matches with the text `` if preferred. */ redact?: IngestRedactProcessor + /** Extracts the registered domain (also known as the effective top-level + * domain or eTLD), sub-domain, and top-level domain from a fully qualified + * domain name (FQDN). Uses the registered domains defined in the Mozilla + * Public Suffix List. */ registered_domain?: IngestRegisteredDomainProcessor + /** Removes existing fields. + * If one field doesn’t exist, an exception will be thrown. */ remove?: IngestRemoveProcessor + /** Renames an existing field. + * If the field doesn’t exist or the new name is already used, an exception will be thrown. */ rename?: IngestRenameProcessor + /** Routes a document to another target index or data stream. + * When setting the `destination` option, the target is explicitly specified and the dataset and namespace options can’t be set. + * When the `destination` option is not set, this processor is in a data stream mode. Note that in this mode, the reroute processor can only be used on data streams that follow the data stream naming scheme. */ reroute?: IngestRerouteProcessor + /** Runs an inline or stored script on incoming documents. + * The script runs in the `ingest` context. */ script?: IngestScriptProcessor + /** Adds a field with the specified value. + * If the field already exists, its value will be replaced with the provided one. */ set?: IngestSetProcessor + /** Sets user-related details (such as `username`, `roles`, `email`, `full_name`, `metadata`, `api_key`, `realm` and `authentication_type`) from the current authenticated user to the current document by pre-processing the ingest. */ set_security_user?: IngestSetSecurityUserProcessor + /** Sorts the elements of an array ascending or descending. + * Homogeneous arrays of numbers will be sorted numerically, while arrays of strings or heterogeneous arrays of strings + numbers will be sorted lexicographically. + * Throws an error when the field is not an array. */ sort?: IngestSortProcessor + /** Splits a field into an array using a separator character. + * Only works on string fields. */ split?: IngestSplitProcessor + /** Terminates the current ingest pipeline, causing no further processors to be run. + * This will normally be executed conditionally, using the `if` option. */ terminate?: IngestTerminateProcessor + /** Trims whitespace from a field. + * If the field is an array of strings, all members of the array will be trimmed. + * This only works on leading and trailing whitespace. */ trim?: IngestTrimProcessor + /** Converts a string to its uppercase equivalent. + * If the field is an array of strings, all members of the array will be converted. */ uppercase?: IngestUppercaseProcessor + /** URL-decodes a string. + * If the field is an array of strings, all members of the array will be decoded. */ urldecode?: IngestUrlDecodeProcessor + /** Parses a Uniform Resource Identifier (URI) string and extracts its components as an object. + * This URI object includes properties for the URI’s domain, path, fragment, port, query, scheme, user info, username, and password. */ uri_parts?: IngestUriPartsProcessor + /** The `user_agent` processor extracts details from the user agent string a browser sends with its web requests. + * This processor adds this information by default under the `user_agent` field. */ user_agent?: IngestUserAgentProcessor } export interface IngestRedact { + /** indicates if document has been redacted */ _is_redacted: boolean } export interface IngestRedactProcessor extends IngestProcessorBase { + /** The field to be redacted */ field: Field + /** A list of grok expressions to match and redact named captures with */ patterns: GrokPattern[] pattern_definitions?: Record + /** Start a redacted section with this token */ prefix?: string + /** End a redacted section with this token */ suffix?: string + /** If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document. */ ignore_missing?: boolean + /** If `true` and the current license does not support running redact processors, then the processor quietly exits without modifying the document */ skip_if_unlicensed?: boolean + /** If `true` then ingest metadata `_ingest._redact._is_redacted` is set to `true` if the document has been redacted */ trace_redact?: boolean } export interface IngestRegisteredDomainProcessor extends IngestProcessorBase { + /** Field containing the source FQDN. */ field: Field + /** Object field containing extracted domain components. If an empty string, + * the processor adds components to the document’s root. */ target_field?: Field + /** If true and any required fields are missing, the processor quietly exits + * without modifying the document. */ ignore_missing?: boolean } export interface IngestRemoveProcessor extends IngestProcessorBase { + /** Fields to be removed. Supports template snippets. */ field: Fields + /** Fields to be kept. When set, all fields other than those specified are removed. */ keep?: Fields + /** If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document. */ ignore_missing?: boolean } export interface IngestRenameProcessor extends IngestProcessorBase { + /** The field to be renamed. + * Supports template snippets. */ field: Field + /** If `true` and `field` does not exist, the processor quietly exits without modifying the document. */ ignore_missing?: boolean + /** The new name of the field. + * Supports template snippets. */ target_field: Field } export interface IngestRerouteProcessor extends IngestProcessorBase { + /** A static value for the target. Can’t be set when the dataset or namespace option is set. */ destination?: string + /** Field references or a static value for the dataset part of the data stream name. + * In addition to the criteria for index names, cannot contain - and must be no longer than 100 characters. + * Example values are nginx.access and nginx.error. + * + * Supports field references with a mustache-like syntax (denoted as {{double}} or {{{triple}}} curly braces). + * When resolving field references, the processor replaces invalid characters with _. Uses the part + * of the index name as a fallback if all field references resolve to a null, missing, or non-string value. + * + * default {{data_stream.dataset}} */ dataset?: string | string[] + /** Field references or a static value for the namespace part of the data stream name. See the criteria for + * index names for allowed characters. Must be no longer than 100 characters. + * + * Supports field references with a mustache-like syntax (denoted as {{double}} or {{{triple}}} curly braces). + * When resolving field references, the processor replaces invalid characters with _. Uses the part + * of the index name as a fallback if all field references resolve to a null, missing, or non-string value. + * + * default {{data_stream.namespace}} */ namespace?: string | string[] } export interface IngestScriptProcessor extends IngestProcessorBase { + /** ID of a stored script. + * If no `source` is specified, this parameter is required. */ id?: Id - lang?: string + /** Script language. */ + lang?: ScriptLanguage + /** Object containing parameters for the script. */ params?: Record - source?: string + /** Inline script. + * If no `id` is specified, this parameter is required. */ + source?: ScriptSource } export interface IngestSetProcessor extends IngestProcessorBase { + /** The origin field which will be copied to `field`, cannot set `value` simultaneously. + * Supported data types are `boolean`, `number`, `array`, `object`, `string`, `date`, etc. */ copy_from?: Field + /** The field to insert, upsert, or update. + * Supports template snippets. */ field: Field + /** If `true` and `value` is a template snippet that evaluates to `null` or the empty string, the processor quietly exits without modifying the document. */ ignore_empty_value?: boolean + /** The media type for encoding `value`. + * Applies only when value is a template snippet. + * Must be one of `application/json`, `text/plain`, or `application/x-www-form-urlencoded`. */ media_type?: string + /** If `true` processor will update fields with pre-existing non-null-valued field. + * When set to `false`, such fields will not be touched. */ override?: boolean + /** The value to be set for the field. + * Supports template snippets. + * May specify only one of `value` or `copy_from`. */ value?: any } export interface IngestSetSecurityUserProcessor extends IngestProcessorBase { + /** The field to store the user information into. */ field: Field + /** Controls what user related properties are added to the field. */ properties?: string[] } @@ -15829,16 +23495,27 @@ export interface IngestSimulateDocumentResult { } export interface IngestSortProcessor extends IngestProcessorBase { + /** The field to be sorted. */ field: Field + /** The sort order to use. + * Accepts `"asc"` or `"desc"`. */ order?: SortOrder + /** The field to assign the sorted value to. + * By default, the field is updated in-place. */ target_field?: Field } export interface IngestSplitProcessor extends IngestProcessorBase { + /** The field to split. */ field: Field + /** If `true` and `field` does not exist, the processor quietly exits without modifying the document. */ ignore_missing?: boolean + /** Preserves empty trailing fields, if any. */ preserve_trailing?: boolean + /** A regex which matches the separator, for example, `,` or `\s+`. */ separator: string + /** The field to assign the split value to. + * By default, the field is updated in-place. */ target_field?: Field } @@ -15846,37 +23523,62 @@ export interface IngestTerminateProcessor extends IngestProcessorBase { } export interface IngestTrimProcessor extends IngestProcessorBase { + /** The string-valued field to trim whitespace from. */ field: Field + /** If `true` and `field` does not exist, the processor quietly exits without modifying the document. */ ignore_missing?: boolean + /** The field to assign the trimmed value to. + * By default, the field is updated in-place. */ target_field?: Field } export interface IngestUppercaseProcessor extends IngestProcessorBase { + /** The field to make uppercase. */ field: Field + /** If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document. */ ignore_missing?: boolean + /** The field to assign the converted value to. + * By default, the field is updated in-place. */ target_field?: Field } export interface IngestUriPartsProcessor extends IngestProcessorBase { + /** Field containing the URI string. */ field: Field + /** If `true` and `field` does not exist, the processor quietly exits without modifying the document. */ ignore_missing?: boolean + /** If `true`, the processor copies the unparsed URI to `.original`. */ keep_original?: boolean + /** If `true`, the processor removes the `field` after parsing the URI string. + * If parsing fails, the processor does not remove the `field`. */ remove_if_successful?: boolean + /** Output field for the URI object. */ target_field?: Field } export interface IngestUrlDecodeProcessor extends IngestProcessorBase { + /** The field to decode. */ field: Field + /** If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document. */ ignore_missing?: boolean + /** The field to assign the converted value to. + * By default, the field is updated in-place. */ target_field?: Field } export interface IngestUserAgentProcessor extends IngestProcessorBase { + /** The field containing the user agent string. */ field: Field + /** If `true` and `field` does not exist, the processor quietly exits without modifying the document. */ ignore_missing?: boolean + /** The name of the file in the `config/ingest-user-agent` directory containing the regular expressions for parsing the user agent string. Both the directory and the file have to be created before starting Elasticsearch. If not specified, ingest-user-agent will use the `regexes.yaml` from uap-core it ships with. */ regex_file?: string + /** The field that will be filled with the user agent details. */ target_field?: Field + /** Controls what properties are added to `target_field`. */ properties?: IngestUserAgentProperty[] + /** Extracts device type from the user agent string on a best-effort basis. + * @beta */ extract_device_type?: boolean } @@ -15886,9 +23588,10 @@ export interface IngestWeb { } export interface IngestDeleteGeoipDatabaseRequest extends RequestBase { -/** A comma-separated list of geoip database configurations to delete */ + /** A comma-separated list of geoip database configurations to delete */ id: Ids - /** The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ + /** The period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration /** The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration @@ -15901,11 +23604,15 @@ export interface IngestDeleteGeoipDatabaseRequest extends RequestBase { export type IngestDeleteGeoipDatabaseResponse = AcknowledgedResponseBase export interface IngestDeleteIpLocationDatabaseRequest extends RequestBase { -/** A comma-separated list of IP location database configurations. */ + /** A comma-separated list of IP location database configurations. */ id: Ids - /** The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. A value of `-1` indicates that the request should never time out. */ + /** The period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. + * A value of `-1` indicates that the request should never time out. */ master_timeout?: Duration - /** The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. A value of `-1` indicates that the request should never time out. */ + /** The period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. + * A value of `-1` indicates that the request should never time out. */ timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { id?: never, master_timeout?: never, timeout?: never } @@ -15916,11 +23623,14 @@ export interface IngestDeleteIpLocationDatabaseRequest extends RequestBase { export type IngestDeleteIpLocationDatabaseResponse = AcknowledgedResponseBase export interface IngestDeletePipelineRequest extends RequestBase { -/** Pipeline ID or wildcard expression of pipeline IDs used to limit the request. To delete all ingest pipelines in a cluster, use a value of `*`. */ + /** Pipeline ID or wildcard expression of pipeline IDs used to limit the request. + * To delete all ingest pipelines in a cluster, use a value of `*`. */ id: Id - /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ + /** Period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration - /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ + /** Period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { id?: never, master_timeout?: never, timeout?: never } @@ -15931,20 +23641,29 @@ export interface IngestDeletePipelineRequest extends RequestBase { export type IngestDeletePipelineResponse = AcknowledgedResponseBase export interface IngestGeoIpStatsGeoIpDownloadStatistics { + /** Total number of successful database downloads. */ successful_downloads: integer + /** Total number of failed database downloads. */ failed_downloads: integer + /** Total milliseconds spent downloading databases. */ total_download_time: DurationValue + /** Current number of databases available for use. */ databases_count: integer + /** Total number of database updates skipped. */ skipped_updates: integer + /** Total number of databases not updated after 30 days */ expired_databases: integer } export interface IngestGeoIpStatsGeoIpNodeDatabaseName { + /** Name of the database. */ name: Name } export interface IngestGeoIpStatsGeoIpNodeDatabases { + /** Downloaded databases for the node. */ databases: IngestGeoIpStatsGeoIpNodeDatabaseName[] + /** Downloaded database files, including related license files. Elasticsearch stores these files in the node’s temporary directory: $ES_TMPDIR/geoip-databases/. */ files_in_temp: string[] } @@ -15956,7 +23675,9 @@ export interface IngestGeoIpStatsRequest extends RequestBase { } export interface IngestGeoIpStatsResponse { + /** Download statistics for all GeoIP2 databases. */ stats: IngestGeoIpStatsGeoIpDownloadStatistics + /** Downloaded GeoIP2 databases for each node. */ nodes: Record } @@ -15968,7 +23689,9 @@ export interface IngestGetGeoipDatabaseDatabaseConfigurationMetadata { } export interface IngestGetGeoipDatabaseRequest extends RequestBase { -/** A comma-separated list of database configuration IDs to retrieve. Wildcard (`*`) expressions are supported. To get all database configurations, omit this parameter or use `*`. */ + /** A comma-separated list of database configuration IDs to retrieve. + * Wildcard (`*`) expressions are supported. + * To get all database configurations, omit this parameter or use `*`. */ id?: Ids /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { id?: never } @@ -15989,9 +23712,13 @@ export interface IngestGetIpLocationDatabaseDatabaseConfigurationMetadata { } export interface IngestGetIpLocationDatabaseRequest extends RequestBase { -/** Comma-separated list of database configuration IDs to retrieve. Wildcard (`*`) expressions are supported. To get all database configurations, omit this parameter or use `*`. */ + /** Comma-separated list of database configuration IDs to retrieve. + * Wildcard (`*`) expressions are supported. + * To get all database configurations, omit this parameter or use `*`. */ id?: Ids - /** The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. A value of `-1` indicates that the request should never time out. */ + /** The period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. + * A value of `-1` indicates that the request should never time out. */ master_timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { id?: never, master_timeout?: never } @@ -16004,9 +23731,12 @@ export interface IngestGetIpLocationDatabaseResponse { } export interface IngestGetPipelineRequest extends RequestBase { -/** Comma-separated list of pipeline IDs to retrieve. Wildcard (`*`) expressions are supported. To get all ingest pipelines, omit this parameter or use `*`. */ + /** Comma-separated list of pipeline IDs to retrieve. + * Wildcard (`*`) expressions are supported. + * To get all ingest pipelines, omit this parameter or use `*`. */ id?: Id - /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ + /** Period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration /** Return pipelines without their definitions (default: false) */ summary?: boolean @@ -16030,15 +23760,17 @@ export interface IngestProcessorGrokResponse { } export interface IngestPutGeoipDatabaseRequest extends RequestBase { -/** ID of the database configuration to create or update. */ + /** ID of the database configuration to create or update. */ id: Id - /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ + /** Period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration /** The provider-assigned name of the IP geolocation database to download. */ name: Name - /** The configuration necessary to identify which IP geolocation provider to use to download the database, as well as any provider-specific configuration necessary for such downloading. At present, the only supported provider is maxmind, and the maxmind provider requires that an account_id (string) is configured. */ + /** The configuration necessary to identify which IP geolocation provider to use to download the database, as well as any provider-specific configuration necessary for such downloading. + * At present, the only supported provider is maxmind, and the maxmind provider requires that an account_id (string) is configured. */ maxmind: IngestMaxmind /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { id?: never, master_timeout?: never, timeout?: never, name?: never, maxmind?: never } @@ -16049,11 +23781,15 @@ export interface IngestPutGeoipDatabaseRequest extends RequestBase { export type IngestPutGeoipDatabaseResponse = AcknowledgedResponseBase export interface IngestPutIpLocationDatabaseRequest extends RequestBase { -/** The database configuration identifier. */ + /** The database configuration identifier. */ id: Id - /** The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. A value of `-1` indicates that the request should never time out. */ + /** The period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. + * A value of `-1` indicates that the request should never time out. */ master_timeout?: Duration - /** The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. If no response is received before the timeout expires, the cluster metadata update still applies but the response indicates that it was not completely acknowledged. A value of `-1` indicates that the request should never time out. */ + /** The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. + * If no response is received before the timeout expires, the cluster metadata update still applies but the response indicates that it was not completely acknowledged. + * A value of `-1` indicates that the request should never time out. */ timeout?: Duration configuration?: IngestDatabaseConfiguration /** All values in `body` will be added to the request body. */ @@ -16065,7 +23801,7 @@ export interface IngestPutIpLocationDatabaseRequest extends RequestBase { export type IngestPutIpLocationDatabaseResponse = AcknowledgedResponseBase export interface IngestPutPipelineRequest extends RequestBase { -/** ID of the ingest pipeline to create or update. */ + /** ID of the ingest pipeline to create or update. */ id: Id /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration @@ -16083,7 +23819,8 @@ export interface IngestPutPipelineRequest extends RequestBase { processors?: IngestProcessorContainer[] /** Version number used by external systems to track ingest pipelines. This parameter is intended for external systems only. Elasticsearch does not use or validate pipeline version numbers. */ version?: VersionNumber - /** Marks this ingest pipeline as deprecated. When a deprecated ingest pipeline is referenced as the default or final pipeline when creating or updating a non-deprecated index template, Elasticsearch will emit a deprecation warning. */ + /** Marks this ingest pipeline as deprecated. + * When a deprecated ingest pipeline is referenced as the default or final pipeline when creating or updating a non-deprecated index template, Elasticsearch will emit a deprecation warning. */ deprecated?: boolean /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { id?: never, master_timeout?: never, timeout?: never, if_version?: never, _meta?: never, description?: never, on_failure?: never, processors?: never, version?: never, deprecated?: never } @@ -16094,13 +23831,16 @@ export interface IngestPutPipelineRequest extends RequestBase { export type IngestPutPipelineResponse = AcknowledgedResponseBase export interface IngestSimulateRequest extends RequestBase { -/** The pipeline to test. If you don't specify a `pipeline` in the request body, this parameter is required. */ + /** The pipeline to test. + * If you don't specify a `pipeline` in the request body, this parameter is required. */ id?: Id /** If `true`, the response includes output data for each processor in the executed pipeline. */ verbose?: boolean /** Sample documents to test in the pipeline. */ docs: IngestDocument[] - /** The pipeline to test. If you don't specify the `pipeline` request path parameter, this parameter is required. If you specify both this and the request path parameter, the API only uses the request path parameter. */ + /** The pipeline to test. + * If you don't specify the `pipeline` request path parameter, this parameter is required. + * If you specify both this and the request path parameter, the API only uses the request path parameter. */ pipeline?: IngestPipeline /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { id?: never, verbose?: never, docs?: never, pipeline?: never } @@ -16130,7 +23870,7 @@ export type LicenseLicenseStatus = 'active' | 'valid' | 'invalid' | 'expired' export type LicenseLicenseType = 'missing' | 'trial' | 'basic' | 'standard' | 'dev' | 'silver' | 'gold' | 'platinum' | 'enterprise' export interface LicenseDeleteRequest extends RequestBase { -/** The period to wait for a connection to the master node. */ + /** The period to wait for a connection to the master node. */ master_timeout?: Duration /** The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration @@ -16158,7 +23898,8 @@ export interface LicenseGetLicenseInformation { } export interface LicenseGetRequest extends RequestBase { -/** If `true`, this parameter returns enterprise for Enterprise license types. If `false`, this parameter returns platinum for both platinum and enterprise license types. This behavior is maintained for backwards compatibility. This parameter is deprecated and will always be set to true in 8.x. */ + /** If `true`, this parameter returns enterprise for Enterprise license types. If `false`, this parameter returns platinum for both platinum and enterprise license types. This behavior is maintained for backwards compatibility. + * This parameter is deprecated and will always be set to true in 8.x. */ accept_enterprise?: boolean /** Specifies whether to retrieve local information. The default value is `false`, which means the information is retrieved from the master node. */ local?: boolean @@ -16200,7 +23941,7 @@ export interface LicensePostAcknowledgement { } export interface LicensePostRequest extends RequestBase { -/** Specifies whether you acknowledge the license changes. */ + /** Specifies whether you acknowledge the license changes. */ acknowledge?: boolean /** The period to wait for a connection to the master node. */ master_timeout?: Duration @@ -16222,7 +23963,7 @@ export interface LicensePostResponse { } export interface LicensePostStartBasicRequest extends RequestBase { -/** whether the user has acknowledged acknowledge messages (default: false) */ + /** whether the user has acknowledged acknowledge messages (default: false) */ acknowledge?: boolean /** Period to wait for a connection to the master node. */ master_timeout?: Duration @@ -16243,7 +23984,7 @@ export interface LicensePostStartBasicResponse { } export interface LicensePostStartTrialRequest extends RequestBase { -/** whether the user has acknowledged acknowledge messages (default: false) */ + /** whether the user has acknowledged acknowledge messages (default: false) */ acknowledge?: boolean type_query_string?: string /** Period to wait for a connection to the master node. */ @@ -16262,11 +24003,21 @@ export interface LicensePostStartTrialResponse { } export interface LogstashPipeline { + /** A description of the pipeline. + * This description is not used by Elasticsearch or Logstash. */ description: string + /** The date the pipeline was last updated. + * It must be in the `yyyy-MM-dd'T'HH:mm:ss.SSSZZ` strict_date_time format. */ last_modified: DateTime + /** The configuration for the pipeline. */ pipeline: string + /** Optional metadata about the pipeline, which can have any contents. + * This metadata is not generated or used by Elasticsearch or Logstash. */ pipeline_metadata: LogstashPipelineMetadata + /** Settings for the pipeline. + * It supports only flat keys in dot notation. */ pipeline_settings: LogstashPipelineSettings + /** The user who last updated the pipeline. */ username: string } @@ -16276,17 +24027,22 @@ export interface LogstashPipelineMetadata { } export interface LogstashPipelineSettings { + /** The number of workers that will, in parallel, execute the filter and output stages of the pipeline. */ 'pipeline.workers': integer + /** The maximum number of events an individual worker thread will collect from inputs before attempting to execute its filters and outputs. */ 'pipeline.batch.size': integer + /** When creating pipeline event batches, how long in milliseconds to wait for each event before dispatching an undersized batch to pipeline workers. */ 'pipeline.batch.delay': integer + /** The internal queuing model to use for event buffering. */ 'queue.type': string - 'queue.max_bytes.number': integer - 'queue.max_bytes.units': string + /** The total capacity of the queue (`queue.type: persisted`) in number of bytes. */ + 'queue.max_bytes': string + /** The maximum number of written events before forcing a checkpoint when persistent queues are enabled (`queue.type: persisted`). */ 'queue.checkpoint.writes': integer } export interface LogstashDeletePipelineRequest extends RequestBase { -/** An identifier for the pipeline. */ + /** An identifier for the pipeline. */ id: Id /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { id?: never } @@ -16297,7 +24053,7 @@ export interface LogstashDeletePipelineRequest extends RequestBase { export type LogstashDeletePipelineResponse = boolean export interface LogstashGetPipelineRequest extends RequestBase { -/** A comma-separated list of pipeline identifiers. */ + /** A comma-separated list of pipeline identifiers. */ id?: Ids /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { id?: never } @@ -16308,7 +24064,7 @@ export interface LogstashGetPipelineRequest extends RequestBase { export type LogstashGetPipelineResponse = Record export interface LogstashPutPipelineRequest extends RequestBase { -/** An identifier for the pipeline. */ + /** An identifier for the pipeline. */ id: Id pipeline?: LogstashPipeline /** All values in `body` will be added to the request body. */ @@ -16320,9 +24076,13 @@ export interface LogstashPutPipelineRequest extends RequestBase { export type LogstashPutPipelineResponse = boolean export interface MigrationDeprecationsDeprecation { + /** Optional details about the deprecation warning. */ details?: string + /** The level property describes the significance of the issue. */ level: MigrationDeprecationsDeprecationLevel + /** Descriptive information about the deprecation warning. */ message: string + /** A link to the breaking change documentation, where you can find more information about this change. */ url: string resolve_during_rolling_upgrade: boolean _meta?: Record @@ -16331,7 +24091,7 @@ export interface MigrationDeprecationsDeprecation { export type MigrationDeprecationsDeprecationLevel = 'none' | 'info' | 'warning' | 'critical' export interface MigrationDeprecationsRequest extends RequestBase { -/** Comma-separate list of data streams or indices to check. Wildcard (*) expressions are supported. */ + /** Comma-separate list of data streams or indices to check. Wildcard (*) expressions are supported. */ index?: IndexName /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { index?: never } @@ -16340,12 +24100,21 @@ export interface MigrationDeprecationsRequest extends RequestBase { } export interface MigrationDeprecationsResponse { + /** Cluster-level deprecation warnings. */ cluster_settings: MigrationDeprecationsDeprecation[] + /** Index warnings are sectioned off per index and can be filtered using an index-pattern in the query. + * This section includes warnings for the backing indices of data streams specified in the request path. */ index_settings: Record data_streams: Record + /** Node-level deprecation warnings. + * Since only a subset of your nodes might incorporate these settings, it is important to read the details section for more information about which nodes are affected. */ node_settings: MigrationDeprecationsDeprecation[] + /** Machine learning-related deprecation warnings. */ ml_settings: MigrationDeprecationsDeprecation[] + /** Template warnings are sectioned off per template and include deprecations for both component templates and + * index templates. */ templates: Record + /** ILM policy warnings are sectioned off per policy. */ ilm_policies: Record } @@ -16393,72 +24162,151 @@ export interface MigrationPostFeatureUpgradeResponse { } export interface MlAdaptiveAllocationsSettings { + /** If true, adaptive_allocations is enabled */ enabled: boolean + /** Specifies the minimum number of allocations to scale to. + * If set, it must be greater than or equal to 0. + * If not defined, the deployment scales to 0. */ min_number_of_allocations?: integer + /** Specifies the maximum number of allocations to scale to. + * If set, it must be greater than or equal to min_number_of_allocations. */ max_number_of_allocations?: integer } export interface MlAnalysisConfig { + /** The size of the interval that the analysis is aggregated into, typically between `5m` and `1h`. This value should be either a whole number of days or equate to a + * whole number of buckets in one day. If the anomaly detection job uses a datafeed with aggregations, this value must also be divisible by the interval of the date histogram aggregation. */ bucket_span?: Duration + /** If `categorization_field_name` is specified, you can also define the analyzer that is used to interpret the categorization field. This property cannot be used at the same time as `categorization_filters`. The categorization analyzer specifies how the `categorization_field` is interpreted by the categorization process. The `categorization_analyzer` field can be specified either as a string or as an object. If it is a string, it must refer to a built-in analyzer or one added by another plugin. */ categorization_analyzer?: MlCategorizationAnalyzer + /** If this property is specified, the values of the specified field will be categorized. The resulting categories must be used in a detector by setting `by_field_name`, `over_field_name`, or `partition_field_name` to the keyword `mlcategory`. */ categorization_field_name?: Field + /** If `categorization_field_name` is specified, you can also define optional filters. This property expects an array of regular expressions. The expressions are used to filter out matching sequences from the categorization field values. You can use this functionality to fine tune the categorization by excluding sequences from consideration when categories are defined. For example, you can exclude SQL statements that appear in your log files. This property cannot be used at the same time as `categorization_analyzer`. If you only want to define simple regular expression filters that are applied prior to tokenization, setting this property is the easiest method. If you also want to customize the tokenizer or post-tokenization filtering, use the `categorization_analyzer` property instead and include the filters as pattern_replace character filters. The effect is exactly the same. */ categorization_filters?: string[] + /** Detector configuration objects specify which data fields a job analyzes. They also specify which analytical functions are used. You can specify multiple detectors for a job. If the detectors array does not contain at least one detector, no analysis can occur and an error is returned. */ detectors: MlDetector[] + /** A comma separated list of influencer field names. Typically these can be the by, over, or partition fields that are used in the detector configuration. You might also want to use a field name that is not specifically named in a detector, but is available as part of the input data. When you use multiple detectors, the use of influencers is recommended as it aggregates results for each influencer entity. */ influencers?: Field[] + /** The size of the window in which to expect data that is out of time order. If you specify a non-zero value, it must be greater than or equal to one second. NOTE: Latency is applicable only when you send data by using the post data API. */ latency?: Duration + /** Advanced configuration option. Affects the pruning of models that have not been updated for the given time duration. The value must be set to a multiple of the `bucket_span`. If set too low, important information may be removed from the model. For jobs created in 8.1 and later, the default value is the greater of `30d` or 20 times `bucket_span`. */ model_prune_window?: Duration + /** This functionality is reserved for internal use. It is not supported for use in customer environments and is not subject to the support SLA of official GA features. If set to `true`, the analysis will automatically find correlations between metrics for a given by field value and report anomalies when those correlations cease to hold. For example, suppose CPU and memory usage on host A is usually highly correlated with the same metrics on host B. Perhaps this correlation occurs because they are running a load-balanced application. If you enable this property, anomalies will be reported when, for example, CPU usage on host A is high and the value of CPU usage on host B is low. That is to say, you’ll see an anomaly when the CPU of host A is unusual given the CPU of host B. To use the `multivariate_by_fields` property, you must also specify `by_field_name` in your detector. */ multivariate_by_fields?: boolean + /** Settings related to how categorization interacts with partition fields. */ per_partition_categorization?: MlPerPartitionCategorization + /** If this property is specified, the data that is fed to the job is expected to be pre-summarized. This property value is the name of the field that contains the count of raw data points that have been summarized. The same `summary_count_field_name` applies to all detectors in the job. NOTE: The `summary_count_field_name` property cannot be used with the `metric` function. */ summary_count_field_name?: Field } export interface MlAnalysisConfigRead { + /** The size of the interval that the analysis is aggregated into, typically between `5m` and `1h`. */ bucket_span: Duration + /** If `categorization_field_name` is specified, you can also define the analyzer that is used to interpret the categorization field. + * This property cannot be used at the same time as `categorization_filters`. + * The categorization analyzer specifies how the `categorization_field` is interpreted by the categorization process. */ categorization_analyzer?: MlCategorizationAnalyzer + /** If this property is specified, the values of the specified field will be categorized. + * The resulting categories must be used in a detector by setting `by_field_name`, `over_field_name`, or `partition_field_name` to the keyword `mlcategory`. */ categorization_field_name?: Field + /** If `categorization_field_name` is specified, you can also define optional filters. + * This property expects an array of regular expressions. + * The expressions are used to filter out matching sequences from the categorization field values. */ categorization_filters?: string[] + /** An array of detector configuration objects. + * Detector configuration objects specify which data fields a job analyzes. + * They also specify which analytical functions are used. + * You can specify multiple detectors for a job. */ detectors: MlDetectorRead[] + /** A comma separated list of influencer field names. + * Typically these can be the by, over, or partition fields that are used in the detector configuration. + * You might also want to use a field name that is not specifically named in a detector, but is available as part of the input data. + * When you use multiple detectors, the use of influencers is recommended as it aggregates results for each influencer entity. */ influencers: Field[] + /** Advanced configuration option. + * Affects the pruning of models that have not been updated for the given time duration. + * The value must be set to a multiple of the `bucket_span`. + * If set too low, important information may be removed from the model. + * Typically, set to `30d` or longer. + * If not set, model pruning only occurs if the model memory status reaches the soft limit or the hard limit. + * For jobs created in 8.1 and later, the default value is the greater of `30d` or 20 times `bucket_span`. */ model_prune_window?: Duration + /** The size of the window in which to expect data that is out of time order. + * Defaults to no latency. + * If you specify a non-zero value, it must be greater than or equal to one second. */ latency?: Duration + /** This functionality is reserved for internal use. + * It is not supported for use in customer environments and is not subject to the support SLA of official GA features. + * If set to `true`, the analysis will automatically find correlations between metrics for a given by field value and report anomalies when those correlations cease to hold. */ multivariate_by_fields?: boolean + /** Settings related to how categorization interacts with partition fields. */ per_partition_categorization?: MlPerPartitionCategorization + /** If this property is specified, the data that is fed to the job is expected to be pre-summarized. + * This property value is the name of the field that contains the count of raw data points that have been summarized. + * The same `summary_count_field_name` applies to all detectors in the job. */ summary_count_field_name?: Field } export interface MlAnalysisLimits { + /** The maximum number of examples stored per category in memory and in the results data store. If you increase this value, more examples are available, however it requires that you have more storage available. If you set this value to 0, no examples are stored. NOTE: The `categorization_examples_limit` applies only to analysis that uses categorization. */ categorization_examples_limit?: long + /** The approximate maximum amount of memory resources that are required for analytical processing. Once this limit is approached, data pruning becomes more aggressive. Upon exceeding this limit, new entities are not modeled. If the `xpack.ml.max_model_memory_limit` setting has a value greater than 0 and less than 1024mb, that value is used instead of the default. The default value is relatively small to ensure that high resource usage is a conscious decision. If you have jobs that are expected to analyze high cardinality fields, you will likely need to use a higher value. If you specify a number instead of a string, the units are assumed to be MiB. Specifying a string is recommended for clarity. If you specify a byte size unit of `b` or `kb` and the number does not equate to a discrete number of megabytes, it is rounded down to the closest MiB. The minimum valid value is 1 MiB. If you specify a value less than 1 MiB, an error occurs. If you specify a value for the `xpack.ml.max_model_memory_limit` setting, an error occurs when you try to create jobs that have `model_memory_limit` values greater than that setting value. */ model_memory_limit?: ByteSize } export interface MlAnalysisMemoryLimit { + /** Limits can be applied for the resources required to hold the mathematical models in memory. These limits are approximate and can be set per job. They do not control the memory used by other processes, for example the Elasticsearch Java processes. */ model_memory_limit: string } export interface MlAnomaly { + /** The actual value for the bucket. */ actual?: double[] + /** Information about the factors impacting the initial anomaly score. */ anomaly_score_explanation?: MlAnomalyExplanation + /** The length of the bucket in seconds. This value matches the `bucket_span` that is specified in the job. */ bucket_span: DurationValue + /** The field used to split the data. In particular, this property is used for analyzing the splits with respect to their own history. It is used for finding unusual values in the context of the split. */ by_field_name?: string + /** The value of `by_field_name`. */ by_field_value?: string + /** For population analysis, an over field must be specified in the detector. This property contains an array of anomaly records that are the causes for the anomaly that has been identified for the over field. This sub-resource contains the most anomalous records for the `over_field_name`. For scalability reasons, a maximum of the 10 most significant causes of the anomaly are returned. As part of the core analytical modeling, these low-level anomaly records are aggregated for their parent over field record. The `causes` resource contains similar elements to the record resource, namely `actual`, `typical`, `geo_results.actual_point`, `geo_results.typical_point`, `*_field_name` and `*_field_value`. Probability and scores are not applicable to causes. */ causes?: MlAnomalyCause[] + /** A unique identifier for the detector. */ detector_index: integer + /** Certain functions require a field to operate on, for example, `sum()`. For those functions, this value is the name of the field to be analyzed. */ field_name?: string + /** The function in which the anomaly occurs, as specified in the detector configuration. For example, `max`. */ function?: string + /** The description of the function in which the anomaly occurs, as specified in the detector configuration. */ function_description?: string + /** If the detector function is `lat_long`, this object contains comma delimited strings for the latitude and longitude of the actual and typical values. */ geo_results?: MlGeoResults + /** If influencers were specified in the detector configuration, this array contains influencers that contributed to or were to blame for an anomaly. */ influencers?: MlInfluence[] + /** A normalized score between 0-100, which is based on the probability of the anomalousness of this record. This is the initial value that was calculated at the time the bucket was processed. */ initial_record_score: double + /** If true, this is an interim result. In other words, the results are calculated based on partial input data. */ is_interim: boolean + /** Identifier for the anomaly detection job. */ job_id: string + /** The field used to split the data. In particular, this property is used for analyzing the splits with respect to the history of all splits. It is used for finding unusual values in the population of all splits. */ over_field_name?: string + /** The value of `over_field_name`. */ over_field_value?: string + /** The field used to segment the analysis. When you use this property, you have completely independent baselines for each value of this field. */ partition_field_name?: string + /** The value of `partition_field_name`. */ partition_field_value?: string + /** The probability of the individual anomaly occurring, in the range 0 to 1. For example, `0.0000772031`. This value can be held to a high precision of over 300 decimal places, so the `record_score` is provided as a human-readable and friendly interpretation of this. */ probability: double + /** A normalized score between 0-100, which is based on the probability of the anomalousness of this record. Unlike `initial_record_score`, this value will be updated by a re-normalization process as new data is analyzed. */ record_score: double + /** Internal. This is always set to `record`. */ result_type: string + /** The start time of the bucket for which these results were calculated. */ timestamp: EpochTime + /** The typical value for the bucket, according to analytical modeling. */ typical?: double[] } @@ -16481,85 +24329,148 @@ export interface MlAnomalyCause { } export interface MlAnomalyExplanation { + /** Impact from the duration and magnitude of the detected anomaly relative to the historical average. */ anomaly_characteristics_impact?: integer + /** Length of the detected anomaly in the number of buckets. */ anomaly_length?: integer + /** Type of the detected anomaly: `spike` or `dip`. */ anomaly_type?: string + /** Indicates reduction of anomaly score for the bucket with large confidence intervals. If a bucket has large confidence intervals, the score is reduced. */ high_variance_penalty?: boolean + /** If the bucket contains fewer samples than expected, the score is reduced. */ incomplete_bucket_penalty?: boolean + /** Lower bound of the 95% confidence interval. */ lower_confidence_bound?: double + /** Impact of the deviation between actual and typical values in the past 12 buckets. */ multi_bucket_impact?: integer + /** Impact of the deviation between actual and typical values in the current bucket. */ single_bucket_impact?: integer + /** Typical (expected) value for this bucket. */ typical_value?: double + /** Upper bound of the 95% confidence interval. */ upper_confidence_bound?: double } export interface MlApiKeyAuthorization { + /** The identifier for the API key. */ id: string + /** The name of the API key. */ name: string } export type MlAppliesTo = 'actual' | 'typical' | 'diff_from_typical' | 'time' export interface MlBucketInfluencer { + /** A normalized score between 0-100, which is calculated for each bucket influencer. This score might be updated as + * newer data is analyzed. */ anomaly_score: double + /** The length of the bucket in seconds. This value matches the bucket span that is specified in the job. */ bucket_span: DurationValue + /** The field name of the influencer. */ influencer_field_name: Field + /** The score between 0-100 for each bucket influencer. This score is the initial value that was calculated at the + * time the bucket was processed. */ initial_anomaly_score: double + /** If true, this is an interim result. In other words, the results are calculated based on partial input data. */ is_interim: boolean + /** Identifier for the anomaly detection job. */ job_id: Id + /** The probability that the bucket has this behavior, in the range 0 to 1. This value can be held to a high precision + * of over 300 decimal places, so the `anomaly_score` is provided as a human-readable and friendly interpretation of + * this. */ probability: double + /** Internal. */ raw_anomaly_score: double + /** Internal. This value is always set to `bucket_influencer`. */ result_type: string + /** The start time of the bucket for which these results were calculated. */ timestamp: EpochTime + /** The start time of the bucket for which these results were calculated. */ timestamp_string?: DateTime } export interface MlBucketSummary { + /** The maximum anomaly score, between 0-100, for any of the bucket influencers. This is an overall, rate-limited + * score for the job. All the anomaly records in the bucket contribute to this score. This value might be updated as + * new data is analyzed. */ anomaly_score: double bucket_influencers: MlBucketInfluencer[] + /** The length of the bucket in seconds. This value matches the bucket span that is specified in the job. */ bucket_span: DurationValue + /** The number of input data records processed in this bucket. */ event_count: long + /** The maximum anomaly score for any of the bucket influencers. This is the initial value that was calculated at the + * time the bucket was processed. */ initial_anomaly_score: double + /** If true, this is an interim result. In other words, the results are calculated based on partial input data. */ is_interim: boolean + /** Identifier for the anomaly detection job. */ job_id: Id + /** The amount of time, in milliseconds, that it took to analyze the bucket contents and calculate results. */ processing_time_ms: DurationValue + /** Internal. This value is always set to bucket. */ result_type: string + /** The start time of the bucket. This timestamp uniquely identifies the bucket. Events that occur exactly at the + * timestamp of the bucket are included in the results for the bucket. */ timestamp: EpochTime + /** The start time of the bucket. This timestamp uniquely identifies the bucket. Events that occur exactly at the + * timestamp of the bucket are included in the results for the bucket. */ timestamp_string?: DateTime } export interface MlCalendarEvent { + /** A string that uniquely identifies a calendar. */ calendar_id?: Id event_id?: Id + /** A description of the scheduled event. */ description: string + /** The timestamp for the end of the scheduled event in milliseconds since the epoch or ISO 8601 format. */ end_time: DateTime + /** The timestamp for the beginning of the scheduled event in milliseconds since the epoch or ISO 8601 format. */ start_time: DateTime + /** When true the model will not create results for this calendar period. */ skip_result?: boolean + /** When true the model will not be updated for this calendar period. */ skip_model_update?: boolean + /** Shift time by this many seconds. For example adjust time for daylight savings changes */ force_time_shift?: integer } export type MlCategorizationAnalyzer = string | MlCategorizationAnalyzerDefinition export interface MlCategorizationAnalyzerDefinition { + /** One or more character filters. In addition to the built-in character filters, other plugins can provide more character filters. If this property is not specified, no character filters are applied prior to categorization. If you are customizing some other aspect of the analyzer and you need to achieve the equivalent of `categorization_filters` (which are not permitted when some other aspect of the analyzer is customized), add them here as pattern replace character filters. */ char_filter?: AnalysisCharFilter[] + /** One or more token filters. In addition to the built-in token filters, other plugins can provide more token filters. If this property is not specified, no token filters are applied prior to categorization. */ filter?: AnalysisTokenFilter[] + /** The name or definition of the tokenizer to use after character filters are applied. This property is compulsory if `categorization_analyzer` is specified as an object. Machine learning provides a tokenizer called `ml_standard` that tokenizes in a way that has been determined to produce good categorization results on a variety of log file formats for logs in English. If you want to use that tokenizer but change the character or token filters, specify "tokenizer": "ml_standard" in your `categorization_analyzer`. Additionally, the `ml_classic` tokenizer is available, which tokenizes in the same way as the non-customizable tokenizer in old versions of the product (before 6.2). `ml_classic` was the default categorization tokenizer in versions 6.2 to 7.13, so if you need categorization identical to the default for jobs created in these versions, specify "tokenizer": "ml_classic" in your `categorization_analyzer`. */ tokenizer?: AnalysisTokenizer } export type MlCategorizationStatus = 'ok' | 'warn' export interface MlCategory { + /** A unique identifier for the category. category_id is unique at the job level, even when per-partition categorization is enabled. */ category_id: ulong + /** A list of examples of actual values that matched the category. */ examples: string[] + /** [experimental] A Grok pattern that could be used in Logstash or an ingest pipeline to extract fields from messages that match the category. This field is experimental and may be changed or removed in a future release. The Grok patterns that are found are not optimal, but are often a good starting point for manual tweaking. */ grok_pattern?: GrokPattern + /** Identifier for the anomaly detection job. */ job_id: Id + /** The maximum length of the fields that matched the category. The value is increased by 10% to enable matching for similar fields that have not been analyzed. */ max_matching_length: ulong + /** If per-partition categorization is enabled, this property identifies the field used to segment the categorization. It is not present when per-partition categorization is disabled. */ partition_field_name?: string + /** If per-partition categorization is enabled, this property identifies the value of the partition_field_name for the category. It is not present when per-partition categorization is disabled. */ partition_field_value?: string + /** A regular expression that is used to search for values that match the category. */ regex: string + /** A space separated list of the common tokens that are matched in values of the category. */ terms: string + /** The number of messages that have been matched by this category. This is only guaranteed to have the latest accurate count after a job _flush or _close */ num_matches?: long + /** A list of category_id entries that this current category encompasses. Any new message that is processed by the categorizer will match against this category and not any of the categories in this list. This is only guaranteed to have the latest accurate list of categories after a job _flush or _close */ preferred_to_categories?: Id[] p?: string result_type: string @@ -16567,25 +24478,40 @@ export interface MlCategory { } export interface MlChunkingConfig { + /** If the mode is `auto`, the chunk size is dynamically calculated; + * this is the recommended value when the datafeed does not use aggregations. + * If the mode is `manual`, chunking is applied according to the specified `time_span`; + * use this mode when the datafeed uses aggregations. If the mode is `off`, no chunking is applied. */ mode: MlChunkingMode + /** The time span that each search will be querying. This setting is applicable only when the `mode` is set to `manual`. */ time_span?: Duration } export type MlChunkingMode = 'auto' | 'manual' | 'off' export interface MlClassificationInferenceOptions { + /** Specifies the number of top class predictions to return. Defaults to 0. */ num_top_classes?: integer + /** Specifies the maximum number of feature importance values per document. */ num_top_feature_importance_values?: integer + /** Specifies the type of the predicted field to write. Acceptable values are: string, number, boolean. When boolean is provided 1.0 is transformed to true and 0.0 to false. */ prediction_field_type?: string + /** The field that is added to incoming documents to contain the inference prediction. Defaults to predicted_value. */ results_field?: string + /** Specifies the field to which the top classes are written. Defaults to top_classes. */ top_classes_results_field?: string } export interface MlCommonTokenizationConfig { + /** Should the tokenizer lower case the text */ do_lower_case?: boolean + /** Maximum input sequence length for the model */ max_sequence_length?: integer + /** Tokenization spanning options. Special value of -1 indicates no spanning takes place */ span?: integer + /** Should tokenization input be automatically truncated before sending to the model for inference */ truncate?: MlTokenizationTruncate + /** Is tokenization completed with special tokens */ with_special_tokens?: boolean } @@ -16616,15 +24542,20 @@ export interface MlDataCounts { } export interface MlDataDescription { + /** Only JSON format is supported at this time. */ format?: string + /** The name of the field that contains the timestamp. */ time_field?: Field + /** The time format, which can be `epoch`, `epoch_ms`, or a custom pattern. The value `epoch` refers to UNIX or Epoch time (the number of seconds since 1 Jan 1970). The value `epoch_ms` indicates that time is measured in milliseconds since the epoch. The `epoch` and `epoch_ms` time formats accept either integer or real values. Custom patterns must conform to the Java DateTimeFormatter class. When you use date-time formatting patterns, it is recommended that you provide the full date, time and time zone. For example: `yyyy-MM-dd'T'HH:mm:ssX`. If the pattern that you specify is not sufficient to produce a complete timestamp, job creation fails. */ time_format?: string field_delimiter?: string } export interface MlDatafeed { aggregations?: Record + /** @alias aggregations */ aggs?: Record + /** The security privileges that the datafeed uses to run its queries. If Elastic Stack security features were disabled at the time of the most recent update to the datafeed, this property is omitted. */ authorization?: MlDatafeedAuthorization chunking_config?: MlChunkingConfig datafeed_id: Id @@ -16643,226 +24574,367 @@ export interface MlDatafeed { } export interface MlDatafeedAuthorization { + /** If an API key was used for the most recent update to the datafeed, its name and identifier are listed in the response. */ api_key?: MlApiKeyAuthorization + /** If a user ID was used for the most recent update to the datafeed, its roles at the time of the update are listed in the response. */ roles?: string[] + /** If a service account was used for the most recent update to the datafeed, the account name is listed in the response. */ service_account?: string } export interface MlDatafeedConfig { + /** If set, the datafeed performs aggregation searches. Support for aggregations is limited and should be used only with low cardinality data. */ aggregations?: Record + /** If set, the datafeed performs aggregation searches. Support for aggregations is limited and should be used only with low cardinality data. + * @alias aggregations */ aggs?: Record + /** Datafeeds might be required to search over long time periods, for several months or years. This search is split into time chunks in order to ensure the load on Elasticsearch is managed. Chunking configuration controls how the size of these time chunks are calculated and is an advanced configuration option. */ chunking_config?: MlChunkingConfig + /** A numerical character string that uniquely identifies the datafeed. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. The default value is the job identifier. */ datafeed_id?: Id + /** Specifies whether the datafeed checks for missing data and the size of the window. The datafeed can optionally search over indices that have already been read in an effort to determine whether any data has subsequently been added to the index. If missing data is found, it is a good indication that the `query_delay` option is set too low and the data is being indexed after the datafeed has passed that moment in time. This check runs only on real-time datafeeds. */ delayed_data_check_config?: MlDelayedDataCheckConfig + /** The interval at which scheduled queries are made while the datafeed runs in real time. The default value is either the bucket span for short bucket spans, or, for longer bucket spans, a sensible fraction of the bucket span. For example: `150s`. When `frequency` is shorter than the bucket span, interim results for the last (partial) bucket are written then eventually overwritten by the full bucket results. If the datafeed uses aggregations, this value must be divisible by the interval of the date histogram aggregation. */ frequency?: Duration + /** An array of index names. Wildcards are supported. If any indices are in remote clusters, the machine learning nodes must have the `remote_cluster_client` role. */ indices?: Indices + /** An array of index names. Wildcards are supported. If any indices are in remote clusters, the machine learning nodes must have the `remote_cluster_client` role. + * @alias indices */ indexes?: Indices + /** Specifies index expansion options that are used during search. */ indices_options?: IndicesOptions job_id?: Id + /** If a real-time datafeed has never seen any data (including during any initial training period) then it will automatically stop itself and close its associated job after this many real-time searches that return no documents. In other words, it will stop after `frequency` times `max_empty_searches` of real-time operation. If not set then a datafeed with no end time that sees no data will remain started until it is explicitly stopped. */ max_empty_searches?: integer + /** The Elasticsearch query domain-specific language (DSL). This value corresponds to the query object in an Elasticsearch search POST body. All the options that are supported by Elasticsearch can be used, as this object is passed verbatim to Elasticsearch. */ query?: QueryDslQueryContainer + /** The number of seconds behind real time that data is queried. For example, if data from 10:04 a.m. might not be searchable in Elasticsearch until 10:06 a.m., set this property to 120 seconds. The default value is randomly selected between `60s` and `120s`. This randomness improves the query performance when there are multiple jobs running on the same node. */ query_delay?: Duration + /** Specifies runtime fields for the datafeed search. */ runtime_mappings?: MappingRuntimeFields + /** Specifies scripts that evaluate custom expressions and returns script fields to the datafeed. The detector configuration objects in a job can contain functions that use these script fields. */ script_fields?: Record + /** The size parameter that is used in Elasticsearch searches when the datafeed does not use aggregations. The maximum value is the value of `index.max_result_window`, which is 10,000 by default. */ scroll_size?: integer } export interface MlDatafeedRunningState { + /** Indicates if the datafeed is "real-time"; meaning that the datafeed has no configured `end` time. */ real_time_configured: boolean + /** Indicates whether the datafeed has finished running on the available past data. + * For datafeeds without a configured `end` time, this means that the datafeed is now running on "real-time" data. */ real_time_running: boolean + /** Provides the latest time interval the datafeed has searched. */ search_interval?: MlRunningStateSearchInterval } export type MlDatafeedState = 'started' | 'stopped' | 'starting' | 'stopping' export interface MlDatafeedStats { + /** For started datafeeds only, contains messages relating to the selection of a node. */ assignment_explanation?: string + /** A numerical character string that uniquely identifies the datafeed. + * This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. + * It must start and end with alphanumeric characters. */ datafeed_id: Id + /** For started datafeeds only, this information pertains to the node upon which the datafeed is started. + * @remarks This property is not supported on Elastic Cloud Serverless. */ node?: MlDiscoveryNodeCompact + /** The status of the datafeed, which can be one of the following values: `starting`, `started`, `stopping`, `stopped`. */ state: MlDatafeedState + /** An object that provides statistical information about timing aspect of this datafeed. */ timing_stats?: MlDatafeedTimingStats + /** An object containing the running state for this datafeed. + * It is only provided if the datafeed is started. */ running_state?: MlDatafeedRunningState } export interface MlDatafeedTimingStats { + /** The number of buckets processed. */ bucket_count: long + /** The exponential average search time per hour, in milliseconds. */ exponential_average_search_time_per_hour_ms: DurationValue exponential_average_calculation_context?: MlExponentialAverageCalculationContext + /** Identifier for the anomaly detection job. */ job_id: Id + /** The number of searches run by the datafeed. */ search_count: long + /** The total time the datafeed spent searching, in milliseconds. */ total_search_time_ms: DurationValue + /** The average search time per bucket, in milliseconds. */ average_search_time_per_bucket_ms?: DurationValue } export interface MlDataframeAnalysis { + /** Advanced configuration option. Machine learning uses loss guided tree growing, which means that the decision trees grow where the regularized loss decreases most quickly. This parameter affects loss calculations by acting as a multiplier of the tree depth. Higher alpha values result in shallower trees and faster training times. By default, this value is calculated during hyperparameter optimization. It must be greater than or equal to zero. */ alpha?: double + /** Defines which field of the document is to be predicted. It must match one of the fields in the index being used to train. If this field is missing from a document, then that document will not be used for training, but a prediction with the trained model will be generated for it. It is also known as continuous target variable. + * For classification analysis, the data type of the field must be numeric (`integer`, `short`, `long`, `byte`), categorical (`ip` or `keyword`), or `boolean`. There must be no more than 30 different values in this field. + * For regression analysis, the data type of the field must be numeric. */ dependent_variable: string + /** Advanced configuration option. Controls the fraction of data that is used to compute the derivatives of the loss function for tree training. A small value results in the use of a small fraction of the data. If this value is set to be less than 1, accuracy typically improves. However, too small a value may result in poor convergence for the ensemble and so require more trees. By default, this value is calculated during hyperparameter optimization. It must be greater than zero and less than or equal to 1. */ downsample_factor?: double + /** Advanced configuration option. Specifies whether the training process should finish if it is not finding any better performing models. If disabled, the training process can take significantly longer and the chance of finding a better performing model is unremarkable. */ early_stopping_enabled?: boolean + /** Advanced configuration option. The shrinkage applied to the weights. Smaller values result in larger forests which have a better generalization error. However, larger forests cause slower training. By default, this value is calculated during hyperparameter optimization. It must be a value between 0.001 and 1. */ eta?: double + /** Advanced configuration option. Specifies the rate at which `eta` increases for each new tree that is added to the forest. For example, a rate of 1.05 increases `eta` by 5% for each extra tree. By default, this value is calculated during hyperparameter optimization. It must be between 0.5 and 2. */ eta_growth_rate_per_tree?: double + /** Advanced configuration option. Defines the fraction of features that will be used when selecting a random bag for each candidate split. By default, this value is calculated during hyperparameter optimization. */ feature_bag_fraction?: double + /** Advanced configuration option. A collection of feature preprocessors that modify one or more included fields. The analysis uses the resulting one or more features instead of the original document field. However, these features are ephemeral; they are not stored in the destination index. Multiple `feature_processors` entries can refer to the same document fields. Automatic categorical feature encoding still occurs for the fields that are unprocessed by a custom processor or that have categorical values. Use this property only if you want to override the automatic feature encoding of the specified fields. */ feature_processors?: MlDataframeAnalysisFeatureProcessor[] + /** Advanced configuration option. Regularization parameter to prevent overfitting on the training data set. Multiplies a linear penalty associated with the size of individual trees in the forest. A high gamma value causes training to prefer small trees. A small gamma value results in larger individual trees and slower training. By default, this value is calculated during hyperparameter optimization. It must be a nonnegative value. */ gamma?: double + /** Advanced configuration option. Regularization parameter to prevent overfitting on the training data set. Multiplies an L2 regularization term which applies to leaf weights of the individual trees in the forest. A high lambda value causes training to favor small leaf weights. This behavior makes the prediction function smoother at the expense of potentially not being able to capture relevant relationships between the features and the dependent variable. A small lambda value results in large individual trees and slower training. By default, this value is calculated during hyperparameter optimization. It must be a nonnegative value. */ lambda?: double + /** Advanced configuration option. A multiplier responsible for determining the maximum number of hyperparameter optimization steps in the Bayesian optimization procedure. The maximum number of steps is determined based on the number of undefined hyperparameters times the maximum optimization rounds per hyperparameter. By default, this value is calculated during hyperparameter optimization. */ max_optimization_rounds_per_hyperparameter?: integer + /** Advanced configuration option. Defines the maximum number of decision trees in the forest. The maximum value is 2000. By default, this value is calculated during hyperparameter optimization. */ max_trees?: integer + /** Advanced configuration option. Defines the maximum number of decision trees in the forest. The maximum value is 2000. By default, this value is calculated during hyperparameter optimization. + * @alias max_trees */ maximum_number_trees?: integer + /** Advanced configuration option. Specifies the maximum number of feature importance values per document to return. By default, no feature importance calculation occurs. */ num_top_feature_importance_values?: integer + /** Defines the name of the prediction field in the results. Defaults to `_prediction`. */ prediction_field_name?: Field + /** Defines the seed for the random generator that is used to pick training data. By default, it is randomly generated. Set it to a specific value to use the same training data each time you start a job (assuming other related parameters such as `source` and `analyzed_fields` are the same). */ randomize_seed?: double + /** Advanced configuration option. Machine learning uses loss guided tree growing, which means that the decision trees grow where the regularized loss decreases most quickly. This soft limit combines with the `soft_tree_depth_tolerance` to penalize trees that exceed the specified depth; the regularized loss increases quickly beyond this depth. By default, this value is calculated during hyperparameter optimization. It must be greater than or equal to 0. */ soft_tree_depth_limit?: integer + /** Advanced configuration option. This option controls how quickly the regularized loss increases when the tree depth exceeds `soft_tree_depth_limit`. By default, this value is calculated during hyperparameter optimization. It must be greater than or equal to 0.01. */ soft_tree_depth_tolerance?: double + /** Defines what percentage of the eligible documents that will be used for training. Documents that are ignored by the analysis (for example those that contain arrays with more than one value) won’t be included in the calculation for used percentage. */ training_percent?: Percentage } export interface MlDataframeAnalysisAnalyzedFields { + /** An array of strings that defines the fields that will be excluded from the analysis. You do not need to add fields with unsupported data types to excludes, these fields are excluded from the analysis automatically. */ includes?: string[] + /** An array of strings that defines the fields that will be included in the analysis. */ excludes?: string[] } export interface MlDataframeAnalysisClassification extends MlDataframeAnalysis { class_assignment_objective?: string + /** Defines the number of categories for which the predicted probabilities are reported. It must be non-negative or -1. If it is -1 or greater than the total number of categories, probabilities are reported for all categories; if you have a large number of categories, there could be a significant effect on the size of your destination index. NOTE: To use the AUC ROC evaluation method, `num_top_classes` must be set to -1 or a value greater than or equal to the total number of categories. */ num_top_classes?: integer } export interface MlDataframeAnalysisContainer { + /** The configuration information necessary to perform classification. */ classification?: MlDataframeAnalysisClassification + /** The configuration information necessary to perform outlier detection. NOTE: Advanced parameters are for fine-tuning classification analysis. They are set automatically by hyperparameter optimization to give the minimum validation error. It is highly recommended to use the default values unless you fully understand the function of these parameters. */ outlier_detection?: MlDataframeAnalysisOutlierDetection + /** The configuration information necessary to perform regression. NOTE: Advanced parameters are for fine-tuning regression analysis. They are set automatically by hyperparameter optimization to give the minimum validation error. It is highly recommended to use the default values unless you fully understand the function of these parameters. */ regression?: MlDataframeAnalysisRegression } export interface MlDataframeAnalysisFeatureProcessor { + /** The configuration information necessary to perform frequency encoding. */ frequency_encoding?: MlDataframeAnalysisFeatureProcessorFrequencyEncoding + /** The configuration information necessary to perform multi encoding. It allows multiple processors to be changed together. This way the output of a processor can then be passed to another as an input. */ multi_encoding?: MlDataframeAnalysisFeatureProcessorMultiEncoding + /** The configuration information necessary to perform n-gram encoding. Features created by this encoder have the following name format: .. For example, if the feature_prefix is f, the feature name for the second unigram in a string is f.11. */ n_gram_encoding?: MlDataframeAnalysisFeatureProcessorNGramEncoding + /** The configuration information necessary to perform one hot encoding. */ one_hot_encoding?: MlDataframeAnalysisFeatureProcessorOneHotEncoding + /** The configuration information necessary to perform target mean encoding. */ target_mean_encoding?: MlDataframeAnalysisFeatureProcessorTargetMeanEncoding } export interface MlDataframeAnalysisFeatureProcessorFrequencyEncoding { + /** The resulting feature name. */ feature_name: Name field: Field + /** The resulting frequency map for the field value. If the field value is missing from the frequency_map, the resulting value is 0. */ frequency_map: Record } export interface MlDataframeAnalysisFeatureProcessorMultiEncoding { + /** The ordered array of custom processors to execute. Must be more than 1. */ processors: integer[] } export interface MlDataframeAnalysisFeatureProcessorNGramEncoding { + /** The feature name prefix. Defaults to ngram__. */ feature_prefix?: string + /** The name of the text field to encode. */ field: Field + /** Specifies the length of the n-gram substring. Defaults to 50. Must be greater than 0. */ length?: integer + /** Specifies which n-grams to gather. It’s an array of integer values where the minimum value is 1, and a maximum value is 5. */ n_grams: integer[] + /** Specifies the zero-indexed start of the n-gram substring. Negative values are allowed for encoding n-grams of string suffixes. Defaults to 0. */ start?: integer custom?: boolean } export interface MlDataframeAnalysisFeatureProcessorOneHotEncoding { + /** The name of the field to encode. */ field: Field + /** The one hot map mapping the field value with the column name. */ hot_map: string } export interface MlDataframeAnalysisFeatureProcessorTargetMeanEncoding { + /** The default value if field value is not found in the target_map. */ default_value: integer + /** The resulting feature name. */ feature_name: Name + /** The name of the field to encode. */ field: Field + /** The field value to target mean transition map. */ target_map: Record } export interface MlDataframeAnalysisOutlierDetection { + /** Specifies whether the feature influence calculation is enabled. */ compute_feature_influence?: boolean + /** The minimum outlier score that a document needs to have in order to calculate its feature influence score. Value range: 0-1. */ feature_influence_threshold?: double + /** The method that outlier detection uses. Available methods are `lof`, `ldof`, `distance_kth_nn`, `distance_knn`, and `ensemble`. The default value is ensemble, which means that outlier detection uses an ensemble of different methods and normalises and combines their individual outlier scores to obtain the overall outlier score. */ method?: string + /** Defines the value for how many nearest neighbors each method of outlier detection uses to calculate its outlier score. When the value is not set, different values are used for different ensemble members. This default behavior helps improve the diversity in the ensemble; only override it if you are confident that the value you choose is appropriate for the data set. */ n_neighbors?: integer + /** The proportion of the data set that is assumed to be outlying prior to outlier detection. For example, 0.05 means it is assumed that 5% of values are real outliers and 95% are inliers. */ outlier_fraction?: double + /** If true, the following operation is performed on the columns before computing outlier scores: `(x_i - mean(x_i)) / sd(x_i)`. */ standardization_enabled?: boolean } export interface MlDataframeAnalysisRegression extends MlDataframeAnalysis { + /** The loss function used during regression. Available options are `mse` (mean squared error), `msle` (mean squared logarithmic error), `huber` (Pseudo-Huber loss). */ loss_function?: string + /** A positive number that is used as a parameter to the `loss_function`. */ loss_function_parameter?: double } export interface MlDataframeAnalytics { + /** An object containing information about the analysis job. */ analysis_stats?: MlDataframeAnalyticsStatsContainer + /** For running jobs only, contains messages relating to the selection of a node to run the job. */ assignment_explanation?: string + /** An object that provides counts for the quantity of documents skipped, used in training, or available for testing. */ data_counts: MlDataframeAnalyticsStatsDataCounts + /** The unique identifier of the data frame analytics job. */ id: Id + /** An object describing memory usage of the analytics. It is present only after the job is started and memory usage is reported. */ memory_usage: MlDataframeAnalyticsStatsMemoryUsage + /** Contains properties for the node that runs the job. This information is available only for running jobs. + * @remarks This property is not supported on Elastic Cloud Serverless. */ node?: NodeAttributes + /** The progress report of the data frame analytics job by phase. */ progress: MlDataframeAnalyticsStatsProgress[] + /** The status of the data frame analytics job, which can be one of the following values: failed, started, starting, stopping, stopped. */ state: MlDataframeState } export interface MlDataframeAnalyticsAuthorization { + /** If an API key was used for the most recent update to the job, its name and identifier are listed in the response. */ api_key?: MlApiKeyAuthorization + /** If a user ID was used for the most recent update to the job, its roles at the time of the update are listed in the response. */ roles?: string[] + /** If a service account was used for the most recent update to the job, the account name is listed in the response. */ service_account?: string } export interface MlDataframeAnalyticsDestination { + /** Defines the destination index to store the results of the data frame analytics job. */ index: IndexName + /** Defines the name of the field in which to store the results of the analysis. Defaults to `ml`. */ results_field?: Field } export interface MlDataframeAnalyticsFieldSelection { + /** Whether the field is selected to be included in the analysis. */ is_included: boolean + /** Whether the field is required. */ is_required: boolean + /** The feature type of this field for the analysis. May be categorical or numerical. */ feature_type?: string + /** The mapping types of the field. */ mapping_types: string[] + /** The field name. */ name: Field + /** The reason a field is not selected to be included in the analysis. */ reason?: string } export interface MlDataframeAnalyticsMemoryEstimation { + /** Estimated memory usage under the assumption that overflowing to disk is allowed during data frame analytics. expected_memory_with_disk is usually smaller than expected_memory_without_disk as using disk allows to limit the main memory needed to perform data frame analytics. */ expected_memory_with_disk: string + /** Estimated memory usage under the assumption that the whole data frame analytics should happen in memory (i.e. without overflowing to disk). */ expected_memory_without_disk: string } export interface MlDataframeAnalyticsSource { + /** Index or indices on which to perform the analysis. It can be a single index or index pattern as well as an array of indices or patterns. NOTE: If your source indices contain documents with the same IDs, only the document that is indexed last appears in the destination index. */ index: Indices + /** The Elasticsearch query domain-specific language (DSL). This value corresponds to the query object in an Elasticsearch search POST body. All the options that are supported by Elasticsearch can be used, as this object is passed verbatim to Elasticsearch. By default, this property has the following value: {"match_all": {}}. */ query?: QueryDslQueryContainer + /** Definitions of runtime fields that will become part of the mapping of the destination index. */ runtime_mappings?: MappingRuntimeFields + /** Specify `includes` and/or `excludes patterns to select which fields will be present in the destination. Fields that are excluded cannot be included in the analysis. */ _source?: MlDataframeAnalysisAnalyzedFields | string[] } export interface MlDataframeAnalyticsStatsContainer { + /** An object containing information about the classification analysis job. */ classification_stats?: MlDataframeAnalyticsStatsHyperparameters + /** An object containing information about the outlier detection job. */ outlier_detection_stats?: MlDataframeAnalyticsStatsOutlierDetection + /** An object containing information about the regression analysis. */ regression_stats?: MlDataframeAnalyticsStatsHyperparameters } export interface MlDataframeAnalyticsStatsDataCounts { + /** The number of documents that are skipped during the analysis because they contained values that are not supported by the analysis. For example, outlier detection does not support missing fields so it skips documents with missing fields. Likewise, all types of analysis skip documents that contain arrays with more than one element. */ skipped_docs_count: integer + /** The number of documents that are not used for training the model and can be used for testing. */ test_docs_count: integer + /** The number of documents that are used for training the model. */ training_docs_count: integer } export interface MlDataframeAnalyticsStatsHyperparameters { + /** An object containing the parameters of the classification analysis job. */ hyperparameters: MlHyperparameters + /** The number of iterations on the analysis. */ iteration: integer + /** The timestamp when the statistics were reported in milliseconds since the epoch. */ timestamp: EpochTime + /** An object containing time statistics about the data frame analytics job. */ timing_stats: MlTimingStats + /** An object containing information about validation loss. */ validation_loss: MlValidationLoss } export interface MlDataframeAnalyticsStatsMemoryUsage { + /** This value is present when the status is hard_limit and it is a new estimate of how much memory the job needs. */ memory_reestimate_bytes?: long + /** The number of bytes used at the highest peak of memory usage. */ peak_usage_bytes: long + /** The memory usage status. */ status: string + /** The timestamp when memory usage was calculated. */ timestamp?: EpochTime } export interface MlDataframeAnalyticsStatsOutlierDetection { + /** The list of job parameters specified by the user or determined by algorithmic heuristics. */ parameters: MlOutlierDetectionParameters + /** The timestamp when the statistics were reported in milliseconds since the epoch. */ timestamp: EpochTime + /** An object containing time statistics about the data frame analytics job. */ timing_stats: MlTimingStats } export interface MlDataframeAnalyticsStatsProgress { + /** Defines the phase of the data frame analytics job. */ phase: string + /** The progress that the data frame analytics job has made expressed in percentage. */ progress_percent: integer } @@ -16870,6 +24942,7 @@ export interface MlDataframeAnalyticsSummary { allow_lazy_start?: boolean analysis: MlDataframeAnalysisContainer analyzed_fields?: MlDataframeAnalysisAnalyzedFields | string[] + /** The security privileges that the job uses to run its queries. If Elastic Stack security features were disabled at the time of the most recent update to the job, this property is omitted. */ authorization?: MlDataframeAnalyticsAuthorization create_time?: EpochTime description?: string @@ -16883,69 +24956,100 @@ export interface MlDataframeAnalyticsSummary { } export interface MlDataframeEvaluationClassification { + /** The field of the index which contains the ground truth. The data type of this field can be boolean or integer. If the data type is integer, the value has to be either 0 (false) or 1 (true). */ actual_field: Field + /** The field in the index which contains the predicted value, in other words the results of the classification analysis. */ predicted_field?: Field + /** The field of the index which is an array of documents of the form { "class_name": XXX, "class_probability": YYY }. This field must be defined as nested in the mappings. */ top_classes_field?: Field + /** Specifies the metrics that are used for the evaluation. */ metrics?: MlDataframeEvaluationClassificationMetrics } export interface MlDataframeEvaluationClassificationMetrics extends MlDataframeEvaluationMetrics { + /** Accuracy of predictions (per-class and overall). */ accuracy?: Record + /** Multiclass confusion matrix. */ multiclass_confusion_matrix?: Record } export interface MlDataframeEvaluationClassificationMetricsAucRoc { + /** Name of the only class that is treated as positive during AUC ROC calculation. Other classes are treated as negative ("one-vs-all" strategy). All the evaluated documents must have class_name in the list of their top classes. */ class_name?: Name + /** Whether or not the curve should be returned in addition to the score. Default value is false. */ include_curve?: boolean } export interface MlDataframeEvaluationContainer { + /** Classification evaluation evaluates the results of a classification analysis which outputs a prediction that identifies to which of the classes each document belongs. */ classification?: MlDataframeEvaluationClassification + /** Outlier detection evaluates the results of an outlier detection analysis which outputs the probability that each document is an outlier. */ outlier_detection?: MlDataframeEvaluationOutlierDetection + /** Regression evaluation evaluates the results of a regression analysis which outputs a prediction of values. */ regression?: MlDataframeEvaluationRegression } export interface MlDataframeEvaluationMetrics { + /** The AUC ROC (area under the curve of the receiver operating characteristic) score and optionally the curve. It is calculated for a specific class (provided as "class_name") treated as positive. */ auc_roc?: MlDataframeEvaluationClassificationMetricsAucRoc + /** Precision of predictions (per-class and average). */ precision?: Record + /** Recall of predictions (per-class and average). */ recall?: Record } export interface MlDataframeEvaluationOutlierDetection { + /** The field of the index which contains the ground truth. The data type of this field can be boolean or integer. If the data type is integer, the value has to be either 0 (false) or 1 (true). */ actual_field: Field + /** The field of the index that defines the probability of whether the item belongs to the class in question or not. It’s the field that contains the results of the analysis. */ predicted_probability_field: Field + /** Specifies the metrics that are used for the evaluation. */ metrics?: MlDataframeEvaluationOutlierDetectionMetrics } export interface MlDataframeEvaluationOutlierDetectionMetrics extends MlDataframeEvaluationMetrics { + /** Accuracy of predictions (per-class and overall). */ confusion_matrix?: Record } export interface MlDataframeEvaluationRegression { + /** The field of the index which contains the ground truth. The data type of this field must be numerical. */ actual_field: Field + /** The field in the index that contains the predicted value, in other words the results of the regression analysis. */ predicted_field: Field + /** Specifies the metrics that are used for the evaluation. For more information on mse, msle, and huber, consult the Jupyter notebook on regression loss functions. */ metrics?: MlDataframeEvaluationRegressionMetrics } export interface MlDataframeEvaluationRegressionMetrics { + /** Average squared difference between the predicted values and the actual (ground truth) value. For more information, read this wiki article. */ mse?: Record + /** Average squared difference between the logarithm of the predicted values and the logarithm of the actual (ground truth) value. */ msle?: MlDataframeEvaluationRegressionMetricsMsle + /** Pseudo Huber loss function. */ huber?: MlDataframeEvaluationRegressionMetricsHuber + /** Proportion of the variance in the dependent variable that is predictable from the independent variables. */ r_squared?: Record } export interface MlDataframeEvaluationRegressionMetricsHuber { + /** Approximates 1/2 (prediction - actual)2 for values much less than delta and approximates a straight line with slope delta for values much larger than delta. Defaults to 1. Delta needs to be greater than 0. */ delta?: double } export interface MlDataframeEvaluationRegressionMetricsMsle { + /** Defines the transition point at which you switch from minimizing quadratic error to minimizing quadratic log error. Defaults to 1. */ offset?: double } export type MlDataframeState = 'started' | 'stopped' | 'starting' | 'stopping' | 'failed' export interface MlDelayedDataCheckConfig { + /** The window of time that is searched for late data. This window of time ends with the latest finalized bucket. + * It defaults to null, which causes an appropriate `check_window` to be calculated when the real-time datafeed runs. + * In particular, the default `check_window` span calculation is based on the maximum of `2h` or `8 * bucket_span`. */ check_window?: Duration + /** Specifies whether the datafeed periodically checks for delayed data. */ enabled: boolean } @@ -16954,40 +25058,82 @@ export type MlDeploymentAllocationState = 'started' | 'starting' | 'fully_alloca export type MlDeploymentAssignmentState = 'started' | 'starting' | 'stopping' | 'failed' export interface MlDetectionRule { + /** The set of actions to be triggered when the rule applies. If more than one action is specified the effects of all actions are combined. */ actions?: MlRuleAction[] + /** An array of numeric conditions when the rule applies. A rule must either have a non-empty scope or at least one condition. Multiple conditions are combined together with a logical AND. */ conditions?: MlRuleCondition[] + /** A scope of series where the rule applies. A rule must either have a non-empty scope or at least one condition. By default, the scope includes all series. Scoping is allowed for any of the fields that are also specified in `by_field_name`, `over_field_name`, or `partition_field_name`. */ scope?: Record } export interface MlDetector { + /** The field used to split the data. In particular, this property is used for analyzing the splits with respect to their own history. It is used for finding unusual values in the context of the split. */ by_field_name?: Field + /** Custom rules enable you to customize the way detectors operate. For example, a rule may dictate conditions under which results should be skipped. Kibana refers to custom rules as job rules. */ custom_rules?: MlDetectionRule[] + /** A description of the detector. */ detector_description?: string + /** A unique identifier for the detector. This identifier is based on the order of the detectors in the `analysis_config`, starting at zero. If you specify a value for this property, it is ignored. */ detector_index?: integer + /** If set, frequent entities are excluded from influencing the anomaly results. Entities can be considered frequent over time or frequent in a population. If you are working with both over and by fields, you can set `exclude_frequent` to `all` for both fields, or to `by` or `over` for those specific fields. */ exclude_frequent?: MlExcludeFrequent + /** The field that the detector uses in the function. If you use an event rate function such as count or rare, do not specify this field. The `field_name` cannot contain double quotes or backslashes. */ field_name?: Field + /** The analysis function that is used. For example, `count`, `rare`, `mean`, `min`, `max`, or `sum`. */ function?: string + /** The field used to split the data. In particular, this property is used for analyzing the splits with respect to the history of all splits. It is used for finding unusual values in the population of all splits. */ over_field_name?: Field + /** The field used to segment the analysis. When you use this property, you have completely independent baselines for each value of this field. */ partition_field_name?: Field + /** Defines whether a new series is used as the null series when there is no value for the by or partition fields. */ use_null?: boolean } export interface MlDetectorRead { + /** The field used to split the data. + * In particular, this property is used for analyzing the splits with respect to their own history. + * It is used for finding unusual values in the context of the split. */ by_field_name?: Field + /** An array of custom rule objects, which enable you to customize the way detectors operate. + * For example, a rule may dictate to the detector conditions under which results should be skipped. + * Kibana refers to custom rules as job rules. */ custom_rules?: MlDetectionRule[] + /** A description of the detector. */ detector_description?: string + /** A unique identifier for the detector. + * This identifier is based on the order of the detectors in the `analysis_config`, starting at zero. */ detector_index?: integer + /** Contains one of the following values: `all`, `none`, `by`, or `over`. + * If set, frequent entities are excluded from influencing the anomaly results. + * Entities can be considered frequent over time or frequent in a population. + * If you are working with both over and by fields, then you can set `exclude_frequent` to all for both fields, or to `by` or `over` for those specific fields. */ exclude_frequent?: MlExcludeFrequent + /** The field that the detector uses in the function. + * If you use an event rate function such as `count` or `rare`, do not specify this field. */ field_name?: Field + /** The analysis function that is used. + * For example, `count`, `rare`, `mean`, `min`, `max`, and `sum`. */ function: string + /** The field used to split the data. + * In particular, this property is used for analyzing the splits with respect to the history of all splits. + * It is used for finding unusual values in the population of all splits. */ over_field_name?: Field + /** The field used to segment the analysis. + * When you use this property, you have completely independent baselines for each value of this field. */ partition_field_name?: Field + /** Defines whether a new series is used as the null series when there is no value for the by or partition fields. */ use_null?: boolean } export interface MlDetectorUpdate { + /** A unique identifier for the detector. + * This identifier is based on the order of the detectors in the `analysis_config`, starting at zero. */ detector_index: integer + /** A description of the detector. */ description?: string + /** An array of custom rule objects, which enable you to customize the way detectors operate. + * For example, a rule may dictate to the detector conditions under which results should be skipped. + * Kibana refers to custom rules as job rules. */ custom_rules?: MlDetectionRule[] } @@ -17021,100 +25167,228 @@ export interface MlExponentialAverageCalculationContext { previous_exponential_average_ms?: DurationValue } +export type MlFeatureExtractor = MlQueryFeatureExtractor + export interface MlFillMaskInferenceOptions { + /** The string/token which will be removed from incoming documents and replaced with the inference prediction(s). + * In a response, this field contains the mask token for the specified model/tokenizer. Each model and tokenizer + * has a predefined mask token which cannot be changed. Thus, it is recommended not to set this value in requests. + * However, if this field is present in a request, its value must match the predefined value for that model/tokenizer, + * otherwise the request will fail. */ mask_token?: string + /** Specifies the number of top class predictions to return. Defaults to 0. */ num_top_classes?: integer + /** The tokenization options to update when inferring */ tokenization?: MlTokenizationConfigContainer + /** The field that is added to incoming documents to contain the inference prediction. Defaults to predicted_value. */ results_field?: string vocabulary: MlVocabulary } export interface MlFillMaskInferenceUpdateOptions { + /** Specifies the number of top class predictions to return. Defaults to 0. */ num_top_classes?: integer + /** The tokenization options to update when inferring */ tokenization?: MlNlpTokenizationUpdateOptions + /** The field that is added to incoming documents to contain the inference prediction. Defaults to predicted_value. */ results_field?: string } export interface MlFilter { + /** A description of the filter. */ description?: string + /** A string that uniquely identifies a filter. */ filter_id: Id + /** An array of strings which is the filter item list. */ items: string[] } export interface MlFilterRef { + /** The identifier for the filter. */ filter_id: Id + /** If set to `include`, the rule applies for values in the filter. If set to `exclude`, the rule applies for values not in the filter. */ filter_type?: MlFilterType } export type MlFilterType = 'include' | 'exclude' export interface MlGeoResults { + /** The actual value for the bucket formatted as a `geo_point`. */ actual_point?: string + /** The typical value for the bucket formatted as a `geo_point`. */ typical_point?: string } export interface MlHyperparameter { + /** A positive number showing how much the parameter influences the variation of the loss function. For hyperparameters with values that are not specified by the user but tuned during hyperparameter optimization. */ absolute_importance?: double + /** Name of the hyperparameter. */ name: Name + /** A number between 0 and 1 showing the proportion of influence on the variation of the loss function among all tuned hyperparameters. For hyperparameters with values that are not specified by the user but tuned during hyperparameter optimization. */ relative_importance?: double + /** Indicates if the hyperparameter is specified by the user (true) or optimized (false). */ supplied: boolean + /** The value of the hyperparameter, either optimized or specified by the user. */ value: double } export interface MlHyperparameters { + /** Advanced configuration option. + * Machine learning uses loss guided tree growing, which means that the decision trees grow where the regularized loss decreases most quickly. + * This parameter affects loss calculations by acting as a multiplier of the tree depth. + * Higher alpha values result in shallower trees and faster training times. + * By default, this value is calculated during hyperparameter optimization. + * It must be greater than or equal to zero. */ alpha?: double + /** Advanced configuration option. + * Regularization parameter to prevent overfitting on the training data set. + * Multiplies an L2 regularization term which applies to leaf weights of the individual trees in the forest. + * A high lambda value causes training to favor small leaf weights. + * This behavior makes the prediction function smoother at the expense of potentially not being able to capture relevant relationships between the features and the dependent variable. + * A small lambda value results in large individual trees and slower training. + * By default, this value is calculated during hyperparameter optimization. + * It must be a nonnegative value. */ lambda?: double + /** Advanced configuration option. + * Regularization parameter to prevent overfitting on the training data set. + * Multiplies a linear penalty associated with the size of individual trees in the forest. + * A high gamma value causes training to prefer small trees. + * A small gamma value results in larger individual trees and slower training. + * By default, this value is calculated during hyperparameter optimization. + * It must be a nonnegative value. */ gamma?: double + /** Advanced configuration option. + * The shrinkage applied to the weights. + * Smaller values result in larger forests which have a better generalization error. + * However, larger forests cause slower training. + * By default, this value is calculated during hyperparameter optimization. + * It must be a value between `0.001` and `1`. */ eta?: double + /** Advanced configuration option. + * Specifies the rate at which `eta` increases for each new tree that is added to the forest. + * For example, a rate of 1.05 increases `eta` by 5% for each extra tree. + * By default, this value is calculated during hyperparameter optimization. + * It must be between `0.5` and `2`. */ eta_growth_rate_per_tree?: double + /** Advanced configuration option. + * Defines the fraction of features that will be used when selecting a random bag for each candidate split. + * By default, this value is calculated during hyperparameter optimization. */ feature_bag_fraction?: double + /** Advanced configuration option. + * Controls the fraction of data that is used to compute the derivatives of the loss function for tree training. + * A small value results in the use of a small fraction of the data. + * If this value is set to be less than 1, accuracy typically improves. + * However, too small a value may result in poor convergence for the ensemble and so require more trees. + * By default, this value is calculated during hyperparameter optimization. + * It must be greater than zero and less than or equal to 1. */ downsample_factor?: double + /** If the algorithm fails to determine a non-trivial tree (more than a single leaf), this parameter determines how many of such consecutive failures are tolerated. + * Once the number of attempts exceeds the threshold, the forest training stops. */ max_attempts_to_add_tree?: integer + /** Advanced configuration option. + * A multiplier responsible for determining the maximum number of hyperparameter optimization steps in the Bayesian optimization procedure. + * The maximum number of steps is determined based on the number of undefined hyperparameters times the maximum optimization rounds per hyperparameter. + * By default, this value is calculated during hyperparameter optimization. */ max_optimization_rounds_per_hyperparameter?: integer + /** Advanced configuration option. + * Defines the maximum number of decision trees in the forest. + * The maximum value is 2000. + * By default, this value is calculated during hyperparameter optimization. */ max_trees?: integer + /** The maximum number of folds for the cross-validation procedure. */ num_folds?: integer + /** Determines the maximum number of splits for every feature that can occur in a decision tree when the tree is trained. */ num_splits_per_feature?: integer + /** Advanced configuration option. + * Machine learning uses loss guided tree growing, which means that the decision trees grow where the regularized loss decreases most quickly. + * This soft limit combines with the `soft_tree_depth_tolerance` to penalize trees that exceed the specified depth; the regularized loss increases quickly beyond this depth. + * By default, this value is calculated during hyperparameter optimization. + * It must be greater than or equal to 0. */ soft_tree_depth_limit?: integer + /** Advanced configuration option. + * This option controls how quickly the regularized loss increases when the tree depth exceeds `soft_tree_depth_limit`. + * By default, this value is calculated during hyperparameter optimization. + * It must be greater than or equal to 0.01. */ soft_tree_depth_tolerance?: double } export type MlInclude = 'definition' | 'feature_importance_baseline' | 'hyperparameters' | 'total_feature_importance' | 'definition_status' export interface MlInferenceConfigCreateContainer { + /** Regression configuration for inference. */ regression?: MlRegressionInferenceOptions + /** Classification configuration for inference. */ classification?: MlClassificationInferenceOptions + /** Text classification configuration for inference. */ text_classification?: MlTextClassificationInferenceOptions + /** Zeroshot classification configuration for inference. */ zero_shot_classification?: MlZeroShotClassificationInferenceOptions + /** Fill mask configuration for inference. */ fill_mask?: MlFillMaskInferenceOptions + learning_to_rank?: MlLearningToRankConfig + /** Named entity recognition configuration for inference. */ ner?: MlNerInferenceOptions + /** Pass through configuration for inference. */ pass_through?: MlPassThroughInferenceOptions + /** Text embedding configuration for inference. */ text_embedding?: MlTextEmbeddingInferenceOptions + /** Text expansion configuration for inference. */ text_expansion?: MlTextExpansionInferenceOptions + /** Question answering configuration for inference. */ question_answering?: MlQuestionAnsweringInferenceOptions } export interface MlInferenceConfigUpdateContainer { + /** Regression configuration for inference. */ regression?: MlRegressionInferenceOptions + /** Classification configuration for inference. */ classification?: MlClassificationInferenceOptions + /** Text classification configuration for inference. */ text_classification?: MlTextClassificationInferenceUpdateOptions + /** Zeroshot classification configuration for inference. */ zero_shot_classification?: MlZeroShotClassificationInferenceUpdateOptions + /** Fill mask configuration for inference. */ fill_mask?: MlFillMaskInferenceUpdateOptions + /** Named entity recognition configuration for inference. */ ner?: MlNerInferenceUpdateOptions + /** Pass through configuration for inference. */ pass_through?: MlPassThroughInferenceUpdateOptions + /** Text embedding configuration for inference. */ text_embedding?: MlTextEmbeddingInferenceUpdateOptions + /** Text expansion configuration for inference. */ text_expansion?: MlTextExpansionInferenceUpdateOptions + /** Question answering configuration for inference */ question_answering?: MlQuestionAnsweringInferenceUpdateOptions } export interface MlInferenceResponseResult { + /** If the model is trained for named entity recognition (NER) tasks, the response contains the recognized entities. */ entities?: MlTrainedModelEntities[] + /** Indicates whether the input text was truncated to meet the model's maximum sequence length limit. This property + * is present only when it is true. */ is_truncated?: boolean + /** If the model is trained for a text classification or zero shot classification task, the response is the + * predicted class. + * For named entity recognition (NER) tasks, it contains the annotated text output. + * For fill mask tasks, it contains the top prediction for replacing the mask token. + * For text embedding tasks, it contains the raw numerical text embedding values. + * For regression models, its a numerical value + * For classification models, it may be an integer, double, boolean or string depending on prediction type */ predicted_value?: MlPredictedValue | MlPredictedValue[] + /** For fill mask tasks, the response contains the input text sequence with the mask token replaced by the predicted + * value. + * Additionally */ predicted_value_sequence?: string + /** Specifies a probability for the predicted value. */ prediction_probability?: double + /** Specifies a confidence score for the predicted value. */ prediction_score?: double + /** For fill mask, text classification, and zero shot classification tasks, the response contains a list of top + * class entries. */ top_classes?: MlTopClassEntry[] + /** If the request failed, the response contains the reason for the failure. */ warning?: string + /** The feature importance for the inference results. Relevant only for classification or regression models */ feature_importance?: MlTrainedModelInferenceFeatureImportance[] } @@ -17124,42 +25398,114 @@ export interface MlInfluence { } export interface MlInfluencer { + /** The length of the bucket in seconds. This value matches the bucket span that is specified in the job. */ bucket_span: DurationValue + /** A normalized score between 0-100, which is based on the probability of the influencer in this bucket aggregated + * across detectors. Unlike `initial_influencer_score`, this value is updated by a re-normalization process as new + * data is analyzed. */ influencer_score: double + /** The field name of the influencer. */ influencer_field_name: Field + /** The entity that influenced, contributed to, or was to blame for the anomaly. */ influencer_field_value: string + /** A normalized score between 0-100, which is based on the probability of the influencer aggregated across detectors. + * This is the initial value that was calculated at the time the bucket was processed. */ initial_influencer_score: double + /** If true, this is an interim result. In other words, the results are calculated based on partial input data. */ is_interim: boolean + /** Identifier for the anomaly detection job. */ job_id: Id + /** The probability that the influencer has this behavior, in the range 0 to 1. This value can be held to a high + * precision of over 300 decimal places, so the `influencer_score` is provided as a human-readable and friendly + * interpretation of this value. */ probability: double + /** Internal. This value is always set to `influencer`. */ result_type: string + /** The start time of the bucket for which these results were calculated. */ timestamp: EpochTime + /** Additional influencer properties are added, depending on the fields being analyzed. For example, if it’s + * analyzing `user_name` as an influencer, a field `user_name` is added to the result document. This + * information enables you to filter the anomaly results more easily. */ foo?: string } export interface MlJob { + /** Advanced configuration option. + * Specifies whether this job can open when there is insufficient machine learning node capacity for it to be immediately assigned to a node. */ allow_lazy_open: boolean + /** The analysis configuration, which specifies how to analyze the data. + * After you create a job, you cannot change the analysis configuration; all the properties are informational. */ analysis_config: MlAnalysisConfig + /** Limits can be applied for the resources required to hold the mathematical models in memory. + * These limits are approximate and can be set per job. + * They do not control the memory used by other processes, for example the Elasticsearch Java processes. */ analysis_limits?: MlAnalysisLimits + /** Advanced configuration option. + * The time between each periodic persistence of the model. + * The default value is a randomized value between 3 to 4 hours, which avoids all jobs persisting at exactly the same time. + * The smallest allowed value is 1 hour. */ background_persist_interval?: Duration blocked?: MlJobBlocked create_time?: DateTime + /** Advanced configuration option. + * Contains custom metadata about the job. */ custom_settings?: MlCustomSettings + /** Advanced configuration option, which affects the automatic removal of old model snapshots for this job. + * It specifies a period of time (in days) after which only the first snapshot per day is retained. + * This period is relative to the timestamp of the most recent snapshot for this job. + * Valid values range from 0 to `model_snapshot_retention_days`. */ daily_model_snapshot_retention_after_days?: long + /** The data description defines the format of the input data when you send data to the job by using the post data API. + * Note that when configuring a datafeed, these properties are automatically set. + * When data is received via the post data API, it is not stored in Elasticsearch. + * Only the results for anomaly detection are retained. */ data_description: MlDataDescription + /** The datafeed, which retrieves data from Elasticsearch for analysis by the job. + * You can associate only one datafeed with each anomaly detection job. */ datafeed_config?: MlDatafeed + /** Indicates that the process of deleting the job is in progress but not yet completed. + * It is only reported when `true`. */ deleting?: boolean + /** A description of the job. */ description?: string + /** If the job closed or failed, this is the time the job finished, otherwise it is `null`. + * This property is informational; you cannot change its value. */ finished_time?: DateTime + /** A list of job groups. + * A job can belong to no groups or many. */ groups?: string[] + /** Identifier for the anomaly detection job. + * This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. + * It must start and end with alphanumeric characters. */ job_id: Id + /** Reserved for future use, currently set to `anomaly_detector`. */ job_type?: string + /** The machine learning configuration version number at which the the job was created. */ job_version?: VersionString + /** This advanced configuration option stores model information along with the results. + * It provides a more detailed view into anomaly detection. + * Model plot provides a simplified and indicative view of the model and its bounds. */ model_plot_config?: MlModelPlotConfig model_snapshot_id?: Id + /** Advanced configuration option, which affects the automatic removal of old model snapshots for this job. + * It specifies the maximum period of time (in days) that snapshots are retained. + * This period is relative to the timestamp of the most recent snapshot for this job. + * By default, snapshots ten days older than the newest snapshot are deleted. */ model_snapshot_retention_days: long + /** Advanced configuration option. + * The period over which adjustments to the score are applied, as new data is seen. + * The default value is the longer of 30 days or 100 `bucket_spans`. */ renormalization_window_days?: long + /** A text string that affects the name of the machine learning results index. + * The default value is `shared`, which generates an index named `.ml-anomalies-shared`. */ results_index_name: IndexName + /** Advanced configuration option. + * The period of time (in days) that results are retained. + * Age is calculated relative to the timestamp of the latest bucket result. + * If this property has a non-null value, once per day at 00:30 (server time), results that are the specified number of days older than the latest bucket result are deleted from Elasticsearch. + * The default value is null, which means all results are retained. + * Annotations generated by the system also count as results for retention purposes; they are deleted after the same number of days as results. + * Annotations added by users are retained forever. */ results_retention_days?: long } @@ -17171,22 +25517,66 @@ export interface MlJobBlocked { export type MlJobBlockedReason = 'delete' | 'reset' | 'revert' export interface MlJobConfig { + /** Advanced configuration option. Specifies whether this job can open when there is insufficient machine learning node capacity for it to be immediately assigned to a node. */ allow_lazy_open?: boolean + /** The analysis configuration, which specifies how to analyze the data. + * After you create a job, you cannot change the analysis configuration; all the properties are informational. */ analysis_config: MlAnalysisConfig + /** Limits can be applied for the resources required to hold the mathematical models in memory. + * These limits are approximate and can be set per job. + * They do not control the memory used by other processes, for example the Elasticsearch Java processes. */ analysis_limits?: MlAnalysisLimits + /** Advanced configuration option. + * The time between each periodic persistence of the model. + * The default value is a randomized value between 3 to 4 hours, which avoids all jobs persisting at exactly the same time. + * The smallest allowed value is 1 hour. */ background_persist_interval?: Duration + /** Advanced configuration option. + * Contains custom metadata about the job. */ custom_settings?: MlCustomSettings + /** Advanced configuration option, which affects the automatic removal of old model snapshots for this job. + * It specifies a period of time (in days) after which only the first snapshot per day is retained. + * This period is relative to the timestamp of the most recent snapshot for this job. */ daily_model_snapshot_retention_after_days?: long + /** The data description defines the format of the input data when you send data to the job by using the post data API. + * Note that when configure a datafeed, these properties are automatically set. */ data_description: MlDataDescription + /** The datafeed, which retrieves data from Elasticsearch for analysis by the job. + * You can associate only one datafeed with each anomaly detection job. */ datafeed_config?: MlDatafeedConfig + /** A description of the job. */ description?: string + /** A list of job groups. A job can belong to no groups or many. */ groups?: string[] + /** Identifier for the anomaly detection job. + * This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. + * It must start and end with alphanumeric characters. */ job_id?: Id + /** Reserved for future use, currently set to `anomaly_detector`. */ job_type?: string + /** This advanced configuration option stores model information along with the results. + * It provides a more detailed view into anomaly detection. + * Model plot provides a simplified and indicative view of the model and its bounds. */ model_plot_config?: MlModelPlotConfig + /** Advanced configuration option, which affects the automatic removal of old model snapshots for this job. + * It specifies the maximum period of time (in days) that snapshots are retained. + * This period is relative to the timestamp of the most recent snapshot for this job. + * The default value is `10`, which means snapshots ten days older than the newest snapshot are deleted. */ model_snapshot_retention_days?: long + /** Advanced configuration option. + * The period over which adjustments to the score are applied, as new data is seen. + * The default value is the longer of 30 days or 100 `bucket_spans`. */ renormalization_window_days?: long + /** A text string that affects the name of the machine learning results index. + * The default value is `shared`, which generates an index named `.ml-anomalies-shared`. */ results_index_name?: IndexName + /** Advanced configuration option. + * The period of time (in days) that results are retained. + * Age is calculated relative to the timestamp of the latest bucket result. + * If this property has a non-null value, once per day at 00:30 (server time), results that are the specified number of days older than the latest bucket result are deleted from Elasticsearch. + * The default value is null, which means all results are retained. + * Annotations generated by the system also count as results for retention purposes; they are deleted after the same number of days as results. + * Annotations added by users are retained forever. */ results_retention_days?: long } @@ -17209,15 +25599,30 @@ export interface MlJobStatistics { } export interface MlJobStats { + /** For open anomaly detection jobs only, contains messages relating to the selection of a node to run the job. */ assignment_explanation?: string + /** An object that describes the quantity of input to the job and any related error counts. + * The `data_count` values are cumulative for the lifetime of a job. + * If a model snapshot is reverted or old results are deleted, the job counts are not reset. */ data_counts: MlDataCounts + /** An object that provides statistical information about forecasts belonging to this job. + * Some statistics are omitted if no forecasts have been made. */ forecasts_stats: MlJobForecastStatistics + /** Identifier for the anomaly detection job. */ job_id: string + /** An object that provides information about the size and contents of the model. */ model_size_stats: MlModelSizeStats + /** Contains properties for the node that runs the job. + * This information is available only for open jobs. + * @remarks This property is not supported on Elastic Cloud Serverless. */ node?: MlDiscoveryNodeCompact + /** For open jobs only, the elapsed time for which the job has been open. */ open_time?: DateTime + /** The status of the anomaly detection job, which can be one of the following values: `closed`, `closing`, `failed`, `opened`, `opening`. */ state: MlJobState + /** An object that provides statistical information about timing aspect of this job. */ timing_stats: MlJobTimingStats + /** Indicates that the process of deleting the job is in progress but not yet completed. It is only reported when `true`. */ deleting?: boolean } @@ -17232,6 +25637,12 @@ export interface MlJobTimingStats { minimum_bucket_processing_time_ms?: DurationValue } +export interface MlLearningToRankConfig { + default_params?: Record + feature_extractors?: Record[] + num_top_feature_importance_values: integer +} + export type MlMemoryStatus = 'ok' | 'soft_limit' | 'hard_limit' export interface MlModelPackageConfig { @@ -17252,8 +25663,11 @@ export interface MlModelPackageConfig { } export interface MlModelPlotConfig { + /** If true, enables calculation and storage of the model change annotations for each entity that is being analyzed. */ annotations_enabled?: boolean + /** If true, enables calculation and storage of the model bounds for each entity that is being analyzed. */ enabled?: boolean + /** Limits data collection to this comma separated list of partition or by field values. If terms are not specified or it is an empty string, no filtering is applied. Wildcards are not supported. Only the specified terms can be viewed when using the Single Metric Viewer. */ terms?: Field } @@ -17283,15 +25697,25 @@ export interface MlModelSizeStats { } export interface MlModelSnapshot { + /** An optional description of the job. */ description?: string + /** A numerical character string that uniquely identifies the job that the snapshot was created for. */ job_id: Id + /** The timestamp of the latest processed record. */ latest_record_time_stamp?: integer + /** The timestamp of the latest bucket result. */ latest_result_time_stamp?: integer + /** The minimum version required to be able to restore the model snapshot. */ min_version: VersionString + /** Summary information describing the model. */ model_size_stats?: MlModelSizeStats + /** If true, this snapshot will not be deleted during automatic cleanup of snapshots older than model_snapshot_retention_days. However, this snapshot will be deleted when the job is deleted. The default value is false. */ retain: boolean + /** For internal use only. */ snapshot_doc_count: long + /** A numerical character string that uniquely identifies the model snapshot. */ snapshot_id: Id + /** The creation timestamp for the snapshot. */ timestamp: long } @@ -17299,19 +25723,25 @@ export interface MlModelSnapshotUpgrade { job_id: Id snapshot_id: Id state: MlSnapshotUpgradeState + /** @remarks This property is not supported on Elastic Cloud Serverless. */ node: MlDiscoveryNode assignment_explanation: string } export interface MlNerInferenceOptions { + /** The tokenization options */ tokenization?: MlTokenizationConfigContainer + /** The field that is added to incoming documents to contain the inference prediction. Defaults to predicted_value. */ results_field?: string + /** The token classification labels. Must be IOB formatted tags */ classification_labels?: string[] vocabulary?: MlVocabulary } export interface MlNerInferenceUpdateOptions { + /** The tokenization options to update when inferring */ tokenization?: MlNlpTokenizationUpdateOptions + /** The field that is added to incoming documents to contain the inference prediction. Defaults to predicted_value. */ results_field?: string } @@ -17319,30 +25749,52 @@ export interface MlNlpBertTokenizationConfig extends MlCommonTokenizationConfig } export interface MlNlpRobertaTokenizationConfig extends MlCommonTokenizationConfig { + /** Should the tokenizer prefix input with a space character */ add_prefix_space?: boolean } export interface MlNlpTokenizationUpdateOptions { + /** Truncate options to apply */ truncate?: MlTokenizationTruncate + /** Span options to apply */ span?: integer } export interface MlOutlierDetectionParameters { + /** Specifies whether the feature influence calculation is enabled. */ compute_feature_influence?: boolean + /** The minimum outlier score that a document needs to have in order to calculate its feature influence score. + * Value range: 0-1 */ feature_influence_threshold?: double + /** The method that outlier detection uses. + * Available methods are `lof`, `ldof`, `distance_kth_nn`, `distance_knn`, and `ensemble`. + * The default value is ensemble, which means that outlier detection uses an ensemble of different methods and normalises and combines their individual outlier scores to obtain the overall outlier score. */ method?: string + /** Defines the value for how many nearest neighbors each method of outlier detection uses to calculate its outlier score. + * When the value is not set, different values are used for different ensemble members. + * This default behavior helps improve the diversity in the ensemble; only override it if you are confident that the value you choose is appropriate for the data set. */ n_neighbors?: integer + /** The proportion of the data set that is assumed to be outlying prior to outlier detection. + * For example, 0.05 means it is assumed that 5% of values are real outliers and 95% are inliers. */ outlier_fraction?: double + /** If `true`, the following operation is performed on the columns before computing outlier scores: (x_i - mean(x_i)) / sd(x_i). */ standardization_enabled?: boolean } export interface MlOverallBucket { + /** The length of the bucket in seconds. Matches the job with the longest bucket_span value. */ bucket_span: DurationValue + /** If true, this is an interim result. In other words, the results are calculated based on partial input data. */ is_interim: boolean + /** An array of objects that contain the max_anomaly_score per job_id. */ jobs: MlOverallBucketJob[] + /** The top_n average of the maximum bucket anomaly_score per job. */ overall_score: double + /** Internal. This is always set to overall_bucket. */ result_type: string + /** The start time of the bucket for which these results were calculated. */ timestamp: EpochTime + /** The start time of the bucket for which these results were calculated. */ timestamp_string?: DateTime } @@ -17352,45 +25804,70 @@ export interface MlOverallBucketJob { } export interface MlPage { + /** Skips the specified number of items. */ from?: integer + /** Specifies the maximum number of items to obtain. */ size?: integer } export interface MlPassThroughInferenceOptions { + /** The tokenization options */ tokenization?: MlTokenizationConfigContainer + /** The field that is added to incoming documents to contain the inference prediction. Defaults to predicted_value. */ results_field?: string vocabulary?: MlVocabulary } export interface MlPassThroughInferenceUpdateOptions { + /** The tokenization options to update when inferring */ tokenization?: MlNlpTokenizationUpdateOptions + /** The field that is added to incoming documents to contain the inference prediction. Defaults to predicted_value. */ results_field?: string } export interface MlPerPartitionCategorization { + /** To enable this setting, you must also set the `partition_field_name` property to the same value in every detector that uses the keyword `mlcategory`. Otherwise, job creation fails. */ enabled?: boolean + /** This setting can be set to true only if per-partition categorization is enabled. If true, both categorization and subsequent anomaly detection stops for partitions where the categorization status changes to warn. This setting makes it viable to have a job where it is expected that categorization works well for some partitions but not others; you do not pay the cost of bad categorization forever in the partitions where it works badly. */ stop_on_warn?: boolean } export type MlPredictedValue = ScalarValue | ScalarValue[] +export interface MlQueryFeatureExtractor { + default_score?: float + feature_name: string + query: QueryDslQueryContainer +} + export interface MlQuestionAnsweringInferenceOptions { + /** Specifies the number of top class predictions to return. Defaults to 0. */ num_top_classes?: integer + /** The tokenization options to update when inferring */ tokenization?: MlTokenizationConfigContainer + /** The field that is added to incoming documents to contain the inference prediction. Defaults to predicted_value. */ results_field?: string + /** The maximum answer length to consider */ max_answer_length?: integer } export interface MlQuestionAnsweringInferenceUpdateOptions { + /** The question to answer given the inference context */ question: string + /** Specifies the number of top class predictions to return. Defaults to 0. */ num_top_classes?: integer + /** The tokenization options to update when inferring */ tokenization?: MlNlpTokenizationUpdateOptions + /** The field that is added to incoming documents to contain the inference prediction. Defaults to predicted_value. */ results_field?: string + /** The maximum answer length to consider for extraction */ max_answer_length?: integer } export interface MlRegressionInferenceOptions { + /** The field that is added to incoming documents to contain the inference prediction. Defaults to predicted_value. */ results_field?: Field + /** Specifies the maximum number of feature importance values per document. */ num_top_feature_importance_values?: integer } @@ -17399,67 +25876,97 @@ export type MlRoutingState = 'failed' | 'started' | 'starting' | 'stopped' | 'st export type MlRuleAction = 'skip_result' | 'skip_model_update' export interface MlRuleCondition { + /** Specifies the result property to which the condition applies. If your detector uses `lat_long`, `metric`, `rare`, or `freq_rare` functions, you can only specify conditions that apply to time. */ applies_to: MlAppliesTo + /** Specifies the condition operator. The available options are greater than, greater than or equals, less than, and less than or equals. */ operator: MlConditionOperator + /** The value that is compared against the `applies_to` field using the operator. */ value: double } export interface MlRunningStateSearchInterval { + /** The end time. */ end?: Duration + /** The end time as an epoch in milliseconds. */ end_ms: DurationValue + /** The start time. */ start?: Duration + /** The start time as an epoch in milliseconds. */ start_ms: DurationValue } export type MlSnapshotUpgradeState = 'loading_old_state' | 'saving_new_state' | 'stopped' | 'failed' export interface MlTextClassificationInferenceOptions { + /** Specifies the number of top class predictions to return. Defaults to 0. */ num_top_classes?: integer + /** The tokenization options */ tokenization?: MlTokenizationConfigContainer + /** The field that is added to incoming documents to contain the inference prediction. Defaults to predicted_value. */ results_field?: string + /** Classification labels to apply other than the stored labels. Must have the same deminsions as the default configured labels */ classification_labels?: string[] + vocabulary?: MlVocabulary } export interface MlTextClassificationInferenceUpdateOptions { + /** Specifies the number of top class predictions to return. Defaults to 0. */ num_top_classes?: integer + /** The tokenization options to update when inferring */ tokenization?: MlNlpTokenizationUpdateOptions + /** The field that is added to incoming documents to contain the inference prediction. Defaults to predicted_value. */ results_field?: string + /** Classification labels to apply other than the stored labels. Must have the same deminsions as the default configured labels */ classification_labels?: string[] } export interface MlTextEmbeddingInferenceOptions { + /** The number of dimensions in the embedding output */ embedding_size?: integer + /** The tokenization options */ tokenization?: MlTokenizationConfigContainer + /** The field that is added to incoming documents to contain the inference prediction. Defaults to predicted_value. */ results_field?: string vocabulary: MlVocabulary } export interface MlTextEmbeddingInferenceUpdateOptions { tokenization?: MlNlpTokenizationUpdateOptions + /** The field that is added to incoming documents to contain the inference prediction. Defaults to predicted_value. */ results_field?: string } export interface MlTextExpansionInferenceOptions { + /** The tokenization options */ tokenization?: MlTokenizationConfigContainer + /** The field that is added to incoming documents to contain the inference prediction. Defaults to predicted_value. */ results_field?: string vocabulary: MlVocabulary } export interface MlTextExpansionInferenceUpdateOptions { tokenization?: MlNlpTokenizationUpdateOptions + /** The field that is added to incoming documents to contain the inference prediction. Defaults to predicted_value. */ results_field?: string } export interface MlTimingStats { + /** Runtime of the analysis in milliseconds. */ elapsed_time: DurationValue + /** Runtime of the latest iteration of the analysis in milliseconds. */ iteration_time?: DurationValue } export interface MlTokenizationConfigContainer { + /** Indicates BERT tokenization and its options */ bert?: MlNlpBertTokenizationConfig + /** Indicates BERT Japanese tokenization and its options */ bert_ja?: MlNlpBertTokenizationConfig + /** Indicates MPNET tokenization and its options */ mpnet?: MlNlpBertTokenizationConfig + /** Indicates RoBERTa tokenization and its options */ roberta?: MlNlpRobertaTokenizationConfig + xlm_roberta?: MlXlmRobertaTokenizationConfig } export type MlTokenizationTruncate = 'first' | 'second' | 'none' @@ -17471,131 +25978,218 @@ export interface MlTopClassEntry { } export interface MlTotalFeatureImportance { + /** The feature for which this importance was calculated. */ feature_name: Name + /** A collection of feature importance statistics related to the training data set for this particular feature. */ importance: MlTotalFeatureImportanceStatistics[] + /** If the trained model is a classification model, feature importance statistics are gathered per target class value. */ classes: MlTotalFeatureImportanceClass[] } export interface MlTotalFeatureImportanceClass { + /** The target class value. Could be a string, boolean, or number. */ class_name: Name + /** A collection of feature importance statistics related to the training data set for this particular feature. */ importance: MlTotalFeatureImportanceStatistics[] } export interface MlTotalFeatureImportanceStatistics { + /** The average magnitude of this feature across all the training data. This value is the average of the absolute values of the importance for this feature. */ mean_magnitude: double + /** The maximum importance value across all the training data for this feature. */ max: integer + /** The minimum importance value across all the training data for this feature. */ min: integer } export interface MlTrainedModelAssignment { adaptive_allocations?: MlAdaptiveAllocationsSettings | null + /** The overall assignment state. */ assignment_state: MlDeploymentAssignmentState max_assigned_allocations?: integer reason?: string + /** The allocation state for each node. */ routing_table: Record + /** The timestamp when the deployment started. */ start_time: DateTime task_parameters: MlTrainedModelAssignmentTaskParameters } +export interface MlTrainedModelAssignmentRoutingStateAndReason { + /** The reason for the current state. It is usually populated only when the + * `routing_state` is `failed`. */ + reason?: string + /** The current routing state. */ + routing_state: MlRoutingState +} + export interface MlTrainedModelAssignmentRoutingTable { + /** The reason for the current state. It is usually populated only when the + * `routing_state` is `failed`. */ reason?: string + /** The current routing state. */ routing_state: MlRoutingState + /** Current number of allocations. */ current_allocations: integer + /** Target number of allocations. */ target_allocations: integer } export interface MlTrainedModelAssignmentTaskParameters { + /** The size of the trained model in bytes. */ model_bytes: ByteSize + /** The unique identifier for the trained model. */ model_id: Id + /** The unique identifier for the trained model deployment. */ deployment_id: Id + /** The size of the trained model cache. */ cache_size?: ByteSize + /** The total number of allocations this model is assigned across ML nodes. */ number_of_allocations: integer priority: MlTrainingPriority per_deployment_memory_bytes: ByteSize per_allocation_memory_bytes: ByteSize + /** Number of inference requests are allowed in the queue at a time. */ queue_capacity: integer + /** Number of threads per allocation. */ threads_per_allocation: integer } export interface MlTrainedModelConfig { + /** Identifier for the trained model. */ model_id: Id + /** The model type */ model_type?: MlTrainedModelType + /** A comma delimited string of tags. A trained model can have many tags, or none. */ tags: string[] + /** The Elasticsearch version number in which the trained model was created. */ version?: VersionString compressed_definition?: string + /** Information on the creator of the trained model. */ created_by?: string + /** The time when the trained model was created. */ create_time?: DateTime + /** Any field map described in the inference configuration takes precedence. */ default_field_map?: Record + /** The free-text description of the trained model. */ description?: string + /** The estimated heap usage in bytes to keep the trained model in memory. */ estimated_heap_memory_usage_bytes?: integer + /** The estimated number of operations to use the trained model. */ estimated_operations?: integer + /** True if the full model definition is present. */ fully_defined?: boolean + /** The default configuration for inference. This can be either a regression, classification, or one of the many NLP focused configurations. It must match the underlying definition.trained_model's target_type. For pre-packaged models such as ELSER the config is not required. */ inference_config?: MlInferenceConfigCreateContainer + /** The input field names for the model definition. */ input: MlTrainedModelConfigInput + /** The license level of the trained model. */ license_level?: string + /** An object containing metadata about the trained model. For example, models created by data frame analytics contain analysis_config and input objects. */ metadata?: MlTrainedModelConfigMetadata model_size_bytes?: ByteSize model_package?: MlModelPackageConfig location?: MlTrainedModelLocation + platform_architecture?: string prefix_strings?: MlTrainedModelPrefixStrings } export interface MlTrainedModelConfigInput { + /** An array of input field names for the model. */ field_names: Field[] } export interface MlTrainedModelConfigMetadata { model_aliases?: string[] + /** An object that contains the baseline for feature importance values. For regression analysis, it is a single value. For classification analysis, there is a value for each class. */ feature_importance_baseline?: Record + /** List of the available hyperparameters optimized during the fine_parameter_tuning phase as well as specified by the user. */ hyperparameters?: MlHyperparameter[] + /** An array of the total feature importance for each feature used from the training data set. This array of objects is returned if data frame analytics trained the model and the request includes total_feature_importance in the include request parameter. */ total_feature_importance?: MlTotalFeatureImportance[] } export interface MlTrainedModelDeploymentAllocationStatus { + /** The current number of nodes where the model is allocated. */ allocation_count: integer + /** The detailed allocation state related to the nodes. */ state: MlDeploymentAllocationState + /** The desired number of nodes for model allocation. */ target_allocation_count: integer } export interface MlTrainedModelDeploymentNodesStats { + /** The average time for each inference call to complete on this node. */ average_inference_time_ms?: DurationValue average_inference_time_ms_last_minute?: DurationValue + /** The average time for each inference call to complete on this node, excluding cache */ average_inference_time_ms_excluding_cache_hits?: DurationValue + /** The number of errors when evaluating the trained model. */ error_count?: integer + /** The total number of inference calls made against this node for this model. */ inference_count?: long inference_cache_hit_count?: long inference_cache_hit_count_last_minute?: long + /** The epoch time stamp of the last inference call for the model on this node. */ last_access?: EpochTime + /** Information pertaining to the node. + * @remarks This property is not supported on Elastic Cloud Serverless. */ node?: MlDiscoveryNode + /** The number of allocations assigned to this node. */ number_of_allocations?: integer + /** The number of inference requests queued to be processed. */ number_of_pending_requests?: integer peak_throughput_per_minute: long - rejection_execution_count?: integer - routing_state: MlTrainedModelAssignmentRoutingTable + /** The number of inference requests that were not processed because the queue was full. */ + rejected_execution_count?: integer + /** The current routing state and reason for the current routing state for this allocation. */ + routing_state: MlTrainedModelAssignmentRoutingStateAndReason + /** The epoch timestamp when the allocation started. */ start_time?: EpochTime + /** The number of threads used by each allocation during inference. */ threads_per_allocation?: integer throughput_last_minute: integer + /** The number of inference requests that timed out before being processed. */ timeout_count?: integer } export interface MlTrainedModelDeploymentStats { adaptive_allocations?: MlAdaptiveAllocationsSettings + /** The detailed allocation status for the deployment. */ allocation_status?: MlTrainedModelDeploymentAllocationStatus cache_size?: ByteSize + /** The unique identifier for the trained model deployment. */ deployment_id: Id + /** The sum of `error_count` for all nodes in the deployment. */ error_count?: integer + /** The sum of `inference_count` for all nodes in the deployment. */ inference_count?: integer + /** The unique identifier for the trained model. */ model_id: Id + /** The deployment stats for each node that currently has the model allocated. + * In serverless, stats are reported for a single unnamed virtual node. */ nodes: MlTrainedModelDeploymentNodesStats[] + /** The number of allocations requested. */ number_of_allocations?: integer peak_throughput_per_minute: long priority: MlTrainingPriority + /** The number of inference requests that can be queued before new requests are rejected. */ queue_capacity?: integer + /** The sum of `rejected_execution_count` for all nodes in the deployment. + * Individual nodes reject an inference request if the inference queue is full. + * The queue size is controlled by the `queue_capacity` setting in the start + * trained model deployment API. */ rejected_execution_count?: integer + /** The reason for the current deployment state. Usually only populated when + * the model is not deployed to a node. */ reason?: string + /** The epoch timestamp when the deployment started. */ start_time: EpochTime + /** The overall state of the deployment. */ state?: MlDeploymentAssignmentState + /** The number of threads used be each allocation during inference. */ threads_per_allocation?: integer + /** The sum of `timeout_count` for all nodes in the deployment. */ timeout_count?: integer } @@ -17619,10 +26213,19 @@ export interface MlTrainedModelInferenceFeatureImportance { } export interface MlTrainedModelInferenceStats { + /** The number of times the model was loaded for inference and was not retrieved from the cache. + * If this number is close to the `inference_count`, the cache is not being appropriately used. + * This can be solved by increasing the cache size or its time-to-live (TTL). + * Refer to general machine learning settings for the appropriate settings. */ cache_miss_count: integer + /** The number of failures when using the model for inference. */ failure_count: integer + /** The total number of times the model has been called for inference. + * This is across all inference contexts, including all pipelines. */ inference_count: integer + /** The number of inference calls where all the training features for the model were missing. */ missing_all_fields_count: integer + /** The time when the statistics were last updated. */ timestamp: EpochTime } @@ -17635,21 +26238,33 @@ export interface MlTrainedModelLocationIndex { } export interface MlTrainedModelPrefixStrings { + /** String prepended to input at ingest */ ingest?: string + /** String prepended to input at search */ search?: string } export interface MlTrainedModelSizeStats { + /** The size of the model in bytes. */ model_size_bytes: ByteSize + /** The amount of memory required to load the model in bytes. */ required_native_memory_bytes: ByteSize } export interface MlTrainedModelStats { + /** A collection of deployment stats, which is present when the models are deployed. */ deployment_stats?: MlTrainedModelDeploymentStats + /** A collection of inference stats fields. */ inference_stats?: MlTrainedModelInferenceStats + /** A collection of ingest stats for the model across all nodes. + * The values are summations of the individual node statistics. + * The format matches the ingest section in the nodes stats API. */ ingest?: Record + /** The unique identifier of the trained model. */ model_id: Id + /** A collection of model size stats. */ model_size_stats: MlTrainedModelSizeStats + /** The number of ingest pipelines that currently refer to the model. */ pipeline_count: integer } @@ -17658,13 +26273,18 @@ export type MlTrainedModelType = 'tree_ensemble' | 'lang_ident' | 'pytorch' export type MlTrainingPriority = 'normal' | 'low' export interface MlTransformAuthorization { + /** If an API key was used for the most recent update to the transform, its name and identifier are listed in the response. */ api_key?: MlApiKeyAuthorization + /** If a user ID was used for the most recent update to the transform, its roles at the time of the update are listed in the response. */ roles?: string[] + /** If a service account was used for the most recent update to the transform, the account name is listed in the response. */ service_account?: string } export interface MlValidationLoss { + /** Validation loss values for every added decision tree during the forest growing procedure. */ fold_values: string[] + /** The type of the loss metric. For example, binomial_logistic. */ loss_type: string } @@ -17672,24 +26292,38 @@ export interface MlVocabulary { index: IndexName } +export interface MlXlmRobertaTokenizationConfig extends MlCommonTokenizationConfig { +} + export interface MlZeroShotClassificationInferenceOptions { + /** The tokenization options to update when inferring */ tokenization?: MlTokenizationConfigContainer + /** Hypothesis template used when tokenizing labels for prediction */ hypothesis_template?: string + /** The zero shot classification labels indicating entailment, neutral, and contradiction + * Must contain exactly and only entailment, neutral, and contradiction */ classification_labels: string[] + /** The field that is added to incoming documents to contain the inference prediction. Defaults to predicted_value. */ results_field?: string + /** Indicates if more than one true label exists. */ multi_label?: boolean + /** The labels to predict. */ labels?: string[] } export interface MlZeroShotClassificationInferenceUpdateOptions { + /** The tokenization options to update when inferring */ tokenization?: MlNlpTokenizationUpdateOptions + /** The field that is added to incoming documents to contain the inference prediction. Defaults to predicted_value. */ results_field?: string + /** Update the configured multi label option. Indicates if more than one true label exists. Defaults to the configured value. */ multi_label?: boolean + /** The labels to predict. */ labels: string[] } export interface MlClearTrainedModelDeploymentCacheRequest extends RequestBase { -/** The unique identifier of the trained model. */ + /** The unique identifier of the trained model. */ model_id: Id /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { model_id?: never } @@ -17702,7 +26336,7 @@ export interface MlClearTrainedModelDeploymentCacheResponse { } export interface MlCloseJobRequest extends RequestBase { -/** Identifier for the anomaly detection job. It can be a job identifier, a group name, or a wildcard expression. You can close multiple anomaly detection jobs in a single API request by using a group name, a comma-separated list of jobs, or a wildcard expression. You can close all jobs by using `_all` or by specifying `*` as the job identifier. */ + /** Identifier for the anomaly detection job. It can be a job identifier, a group name, or a wildcard expression. You can close multiple anomaly detection jobs in a single API request by using a group name, a comma-separated list of jobs, or a wildcard expression. You can close all jobs by using `_all` or by specifying `*` as the job identifier. */ job_id: Id /** Refer to the description for the `allow_no_match` query parameter. */ allow_no_match?: boolean @@ -17721,7 +26355,7 @@ export interface MlCloseJobResponse { } export interface MlDeleteCalendarRequest extends RequestBase { -/** A string that uniquely identifies a calendar. */ + /** A string that uniquely identifies a calendar. */ calendar_id: Id /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { calendar_id?: never } @@ -17732,9 +26366,10 @@ export interface MlDeleteCalendarRequest extends RequestBase { export type MlDeleteCalendarResponse = AcknowledgedResponseBase export interface MlDeleteCalendarEventRequest extends RequestBase { -/** A string that uniquely identifies a calendar. */ + /** A string that uniquely identifies a calendar. */ calendar_id: Id - /** Identifier for the scheduled event. You can obtain this identifier by using the get calendar events API. */ + /** Identifier for the scheduled event. + * You can obtain this identifier by using the get calendar events API. */ event_id: Id /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { calendar_id?: never, event_id?: never } @@ -17745,9 +26380,10 @@ export interface MlDeleteCalendarEventRequest extends RequestBase { export type MlDeleteCalendarEventResponse = AcknowledgedResponseBase export interface MlDeleteCalendarJobRequest extends RequestBase { -/** A string that uniquely identifies a calendar. */ + /** A string that uniquely identifies a calendar. */ calendar_id: Id - /** An identifier for the anomaly detection jobs. It can be a job identifier, a group name, or a comma-separated list of jobs or groups. */ + /** An identifier for the anomaly detection jobs. It can be a job identifier, a group name, or a + * comma-separated list of jobs or groups. */ job_id: Ids /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { calendar_id?: never, job_id?: never } @@ -17756,13 +26392,16 @@ export interface MlDeleteCalendarJobRequest extends RequestBase { } export interface MlDeleteCalendarJobResponse { + /** A string that uniquely identifies a calendar. */ calendar_id: Id + /** A description of the calendar. */ description?: string + /** A list of anomaly detection job identifiers or group names. */ job_ids: Ids } export interface MlDeleteDataFrameAnalyticsRequest extends RequestBase { -/** Identifier for the data frame analytics job. */ + /** Identifier for the data frame analytics job. */ id: Id /** If `true`, it deletes a job that is not stopped; this method is quicker than stopping and deleting the job. */ force?: boolean @@ -17777,9 +26416,13 @@ export interface MlDeleteDataFrameAnalyticsRequest extends RequestBase { export type MlDeleteDataFrameAnalyticsResponse = AcknowledgedResponseBase export interface MlDeleteDatafeedRequest extends RequestBase { -/** A numerical character string that uniquely identifies the datafeed. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. */ + /** A numerical character string that uniquely identifies the datafeed. This + * identifier can contain lowercase alphanumeric characters (a-z and 0-9), + * hyphens, and underscores. It must start and end with alphanumeric + * characters. */ datafeed_id: Id - /** Use to forcefully delete a started datafeed; this method is quicker than stopping and deleting the datafeed. */ + /** Use to forcefully delete a started datafeed; this method is quicker than + * stopping and deleting the datafeed. */ force?: boolean /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { datafeed_id?: never, force?: never } @@ -17790,9 +26433,11 @@ export interface MlDeleteDatafeedRequest extends RequestBase { export type MlDeleteDatafeedResponse = AcknowledgedResponseBase export interface MlDeleteExpiredDataRequest extends RequestBase { -/** Identifier for an anomaly detection job. It can be a job identifier, a group name, or a wildcard expression. */ + /** Identifier for an anomaly detection job. It can be a job identifier, a + * group name, or a wildcard expression. */ job_id?: Id - /** The desired requests per second for the deletion processes. The default behavior is no throttling. */ + /** The desired requests per second for the deletion processes. The default + * behavior is no throttling. */ requests_per_second?: float /** How long can the underlying delete processes run until they are canceled. */ timeout?: Duration @@ -17807,7 +26452,7 @@ export interface MlDeleteExpiredDataResponse { } export interface MlDeleteFilterRequest extends RequestBase { -/** A string that uniquely identifies a filter. */ + /** A string that uniquely identifies a filter. */ filter_id: Id /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { filter_id?: never } @@ -17818,13 +26463,20 @@ export interface MlDeleteFilterRequest extends RequestBase { export type MlDeleteFilterResponse = AcknowledgedResponseBase export interface MlDeleteForecastRequest extends RequestBase { -/** Identifier for the anomaly detection job. */ + /** Identifier for the anomaly detection job. */ job_id: Id - /** A comma-separated list of forecast identifiers. If you do not specify this optional parameter or if you specify `_all` or `*` the API deletes all forecasts from the job. */ + /** A comma-separated list of forecast identifiers. If you do not specify + * this optional parameter or if you specify `_all` or `*` the API deletes + * all forecasts from the job. */ forecast_id?: Id - /** Specifies whether an error occurs when there are no forecasts. In particular, if this parameter is set to `false` and there are no forecasts associated with the job, attempts to delete all forecasts return an error. */ + /** Specifies whether an error occurs when there are no forecasts. In + * particular, if this parameter is set to `false` and there are no + * forecasts associated with the job, attempts to delete all forecasts + * return an error. */ allow_no_forecasts?: boolean - /** Specifies the period of time to wait for the completion of the delete operation. When this period of time elapses, the API fails and returns an error. */ + /** Specifies the period of time to wait for the completion of the delete + * operation. When this period of time elapses, the API fails and returns an + * error. */ timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { job_id?: never, forecast_id?: never, allow_no_forecasts?: never, timeout?: never } @@ -17835,13 +26487,17 @@ export interface MlDeleteForecastRequest extends RequestBase { export type MlDeleteForecastResponse = AcknowledgedResponseBase export interface MlDeleteJobRequest extends RequestBase { -/** Identifier for the anomaly detection job. */ + /** Identifier for the anomaly detection job. */ job_id: Id - /** Use to forcefully delete an opened job; this method is quicker than closing and deleting the job. */ + /** Use to forcefully delete an opened job; this method is quicker than + * closing and deleting the job. */ force?: boolean - /** Specifies whether annotations that have been added by the user should be deleted along with any auto-generated annotations when the job is reset. */ + /** Specifies whether annotations that have been added by the + * user should be deleted along with any auto-generated annotations when the job is + * reset. */ delete_user_annotations?: boolean - /** Specifies whether the request should return immediately or wait until the job deletion completes. */ + /** Specifies whether the request should return immediately or wait until the + * job deletion completes. */ wait_for_completion?: boolean /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { job_id?: never, force?: never, delete_user_annotations?: never, wait_for_completion?: never } @@ -17852,7 +26508,7 @@ export interface MlDeleteJobRequest extends RequestBase { export type MlDeleteJobResponse = AcknowledgedResponseBase export interface MlDeleteModelSnapshotRequest extends RequestBase { -/** Identifier for the anomaly detection job. */ + /** Identifier for the anomaly detection job. */ job_id: Id /** Identifier for the model snapshot. */ snapshot_id: Id @@ -17865,7 +26521,7 @@ export interface MlDeleteModelSnapshotRequest extends RequestBase { export type MlDeleteModelSnapshotResponse = AcknowledgedResponseBase export interface MlDeleteTrainedModelRequest extends RequestBase { -/** The unique identifier of the trained model. */ + /** The unique identifier of the trained model. */ model_id: Id /** Forcefully deletes a trained model that is referenced by ingest pipelines or has a started deployment. */ force?: boolean @@ -17880,7 +26536,7 @@ export interface MlDeleteTrainedModelRequest extends RequestBase { export type MlDeleteTrainedModelResponse = AcknowledgedResponseBase export interface MlDeleteTrainedModelAliasRequest extends RequestBase { -/** The model alias to delete. */ + /** The model alias to delete. */ model_alias: Name /** The trained model ID to which the model alias refers. */ model_id: Id @@ -17893,11 +26549,22 @@ export interface MlDeleteTrainedModelAliasRequest extends RequestBase { export type MlDeleteTrainedModelAliasResponse = AcknowledgedResponseBase export interface MlEstimateModelMemoryRequest extends RequestBase { -/** For a list of the properties that you can specify in the `analysis_config` component of the body of this API. */ + /** For a list of the properties that you can specify in the + * `analysis_config` component of the body of this API. */ analysis_config?: MlAnalysisConfig - /** Estimates of the highest cardinality in a single bucket that is observed for influencer fields over the time period that the job analyzes data. To produce a good answer, values must be provided for all influencer fields. Providing values for fields that are not listed as `influencers` has no effect on the estimation. */ + /** Estimates of the highest cardinality in a single bucket that is observed + * for influencer fields over the time period that the job analyzes data. + * To produce a good answer, values must be provided for all influencer + * fields. Providing values for fields that are not listed as `influencers` + * has no effect on the estimation. */ max_bucket_cardinality?: Record - /** Estimates of the cardinality that is observed for fields over the whole time period that the job analyzes data. To produce a good answer, values must be provided for fields referenced in the `by_field_name`, `over_field_name` and `partition_field_name` of any detectors. Providing values for other fields has no effect on the estimation. It can be omitted from the request if no detectors have a `by_field_name`, `over_field_name` or `partition_field_name`. */ + /** Estimates of the cardinality that is observed for fields over the whole + * time period that the job analyzes data. To produce a good answer, values + * must be provided for fields referenced in the `by_field_name`, + * `over_field_name` and `partition_field_name` of any detectors. Providing + * values for other fields has no effect on the estimation. It can be + * omitted from the request if no detectors have a `by_field_name`, + * `over_field_name` or `partition_field_name`. */ overall_cardinality?: Record /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { analysis_config?: never, max_bucket_cardinality?: never, overall_cardinality?: never } @@ -17922,17 +26589,27 @@ export interface MlEvaluateDataFrameConfusionMatrixPrediction { } export interface MlEvaluateDataFrameConfusionMatrixThreshold { + /** True Positive */ tp: integer + /** False Positive */ fp: integer + /** True Negative */ tn: integer + /** False Negative */ fn: integer } export interface MlEvaluateDataFrameDataframeClassificationSummary { + /** The AUC ROC (area under the curve of the receiver operating characteristic) score and optionally the curve. + * It is calculated for a specific class (provided as "class_name") treated as positive. */ auc_roc?: MlEvaluateDataFrameDataframeEvaluationSummaryAucRoc + /** Accuracy of predictions (per-class and overall). */ accuracy?: MlEvaluateDataFrameDataframeClassificationSummaryAccuracy + /** Multiclass confusion matrix. */ multiclass_confusion_matrix?: MlEvaluateDataFrameDataframeClassificationSummaryMulticlassConfusionMatrix + /** Precision of predictions (per-class and average). */ precision?: MlEvaluateDataFrameDataframeClassificationSummaryPrecision + /** Recall of predictions (per-class and average). */ recall?: MlEvaluateDataFrameDataframeClassificationSummaryRecall } @@ -17975,21 +26652,29 @@ export interface MlEvaluateDataFrameDataframeEvaluationValue { } export interface MlEvaluateDataFrameDataframeOutlierDetectionSummary { + /** The AUC ROC (area under the curve of the receiver operating characteristic) score and optionally the curve. */ auc_roc?: MlEvaluateDataFrameDataframeEvaluationSummaryAucRoc + /** Set the different thresholds of the outlier score at where the metric is calculated. */ precision?: Record + /** Set the different thresholds of the outlier score at where the metric is calculated. */ recall?: Record + /** Set the different thresholds of the outlier score at where the metrics (`tp` - true positive, `fp` - false positive, `tn` - true negative, `fn` - false negative) are calculated. */ confusion_matrix?: Record } export interface MlEvaluateDataFrameDataframeRegressionSummary { + /** Pseudo Huber loss function. */ huber?: MlEvaluateDataFrameDataframeEvaluationValue + /** Average squared difference between the predicted values and the actual (`ground truth`) value. */ mse?: MlEvaluateDataFrameDataframeEvaluationValue + /** Average squared difference between the logarithm of the predicted values and the logarithm of the actual (`ground truth`) value. */ msle?: MlEvaluateDataFrameDataframeEvaluationValue + /** Proportion of the variance in the dependent variable that is predictable from the independent variables. */ r_squared?: MlEvaluateDataFrameDataframeEvaluationValue } export interface MlEvaluateDataFrameRequest extends RequestBase { -/** Defines the type of evaluation you want to perform. */ + /** Defines the type of evaluation you want to perform. */ evaluation: MlDataframeEvaluationContainer /** Defines the `index` in which the evaluation will be performed. */ index: IndexName @@ -18002,29 +26687,52 @@ export interface MlEvaluateDataFrameRequest extends RequestBase { } export interface MlEvaluateDataFrameResponse { + /** Evaluation results for a classification analysis. + * It outputs a prediction that identifies to which of the classes each document belongs. */ classification?: MlEvaluateDataFrameDataframeClassificationSummary + /** Evaluation results for an outlier detection analysis. + * It outputs the probability that each document is an outlier. */ outlier_detection?: MlEvaluateDataFrameDataframeOutlierDetectionSummary + /** Evaluation results for a regression analysis which outputs a prediction of values. */ regression?: MlEvaluateDataFrameDataframeRegressionSummary } export interface MlExplainDataFrameAnalyticsRequest extends RequestBase { -/** Identifier for the data frame analytics job. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. */ + /** Identifier for the data frame analytics job. This identifier can contain + * lowercase alphanumeric characters (a-z and 0-9), hyphens, and + * underscores. It must start and end with alphanumeric characters. */ id?: Id - /** The configuration of how to source the analysis data. It requires an index. Optionally, query and _source may be specified. */ + /** The configuration of how to source the analysis data. It requires an + * index. Optionally, query and _source may be specified. */ source?: MlDataframeAnalyticsSource - /** The destination configuration, consisting of index and optionally results_field (ml by default). */ + /** The destination configuration, consisting of index and optionally + * results_field (ml by default). */ dest?: MlDataframeAnalyticsDestination - /** The analysis configuration, which contains the information necessary to perform one of the following types of analysis: classification, outlier detection, or regression. */ + /** The analysis configuration, which contains the information necessary to + * perform one of the following types of analysis: classification, outlier + * detection, or regression. */ analysis?: MlDataframeAnalysisContainer /** A description of the job. */ description?: string - /** The approximate maximum amount of memory resources that are permitted for analytical processing. If your `elasticsearch.yml` file contains an `xpack.ml.max_model_memory_limit` setting, an error occurs when you try to create data frame analytics jobs that have `model_memory_limit` values greater than that setting. */ + /** The approximate maximum amount of memory resources that are permitted for + * analytical processing. If your `elasticsearch.yml` file contains an + * `xpack.ml.max_model_memory_limit` setting, an error occurs when you try to + * create data frame analytics jobs that have `model_memory_limit` values + * greater than that setting. */ model_memory_limit?: string - /** The maximum number of threads to be used by the analysis. Using more threads may decrease the time necessary to complete the analysis at the cost of using more CPU. Note that the process may use additional threads for operational functionality other than the analysis itself. */ + /** The maximum number of threads to be used by the analysis. Using more + * threads may decrease the time necessary to complete the analysis at the + * cost of using more CPU. Note that the process may use additional threads + * for operational functionality other than the analysis itself. */ max_num_threads?: integer - /** Specify includes and/or excludes patterns to select which fields will be included in the analysis. The patterns specified in excludes are applied last, therefore excludes takes precedence. In other words, if the same field is specified in both includes and excludes, then the field will not be included in the analysis. */ + /** Specify includes and/or excludes patterns to select which fields will be + * included in the analysis. The patterns specified in excludes are applied + * last, therefore excludes takes precedence. In other words, if the same + * field is specified in both includes and excludes, then the field will not + * be included in the analysis. */ analyzed_fields?: MlDataframeAnalysisAnalyzedFields | string[] - /** Specifies whether this job can start when there is insufficient machine learning node capacity for it to be immediately assigned to a node. */ + /** Specifies whether this job can start when there is insufficient machine + * learning node capacity for it to be immediately assigned to a node. */ allow_lazy_start?: boolean /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { id?: never, source?: never, dest?: never, analysis?: never, description?: never, model_memory_limit?: never, max_num_threads?: never, analyzed_fields?: never, allow_lazy_start?: never } @@ -18033,12 +26741,14 @@ export interface MlExplainDataFrameAnalyticsRequest extends RequestBase { } export interface MlExplainDataFrameAnalyticsResponse { + /** An array of objects that explain selection for each field, sorted by the field names. */ field_selection: MlDataframeAnalyticsFieldSelection[] + /** An array of objects that explain selection for each field, sorted by the field names. */ memory_estimation: MlDataframeAnalyticsMemoryEstimation } export interface MlFlushJobRequest extends RequestBase { -/** Identifier for the anomaly detection job. */ + /** Identifier for the anomaly detection job. */ job_id: Id /** Refer to the description for the `advance_time` query parameter. */ advance_time?: DateTime @@ -18058,11 +26768,14 @@ export interface MlFlushJobRequest extends RequestBase { export interface MlFlushJobResponse { flushed: boolean + /** Provides the timestamp (in milliseconds since the epoch) of the end of + * the last bucket that was processed. */ last_finalized_bucket_end?: integer } export interface MlForecastRequest extends RequestBase { -/** Identifier for the anomaly detection job. The job must be open when you create a forecast; otherwise, an error occurs. */ + /** Identifier for the anomaly detection job. The job must be open when you + * create a forecast; otherwise, an error occurs. */ job_id: Id /** Refer to the description for the `duration` query parameter. */ duration?: Duration @@ -18082,9 +26795,10 @@ export interface MlForecastResponse { } export interface MlGetBucketsRequest extends RequestBase { -/** Identifier for the anomaly detection job. */ + /** Identifier for the anomaly detection job. */ job_id: Id - /** The timestamp of a single bucket result. If you do not specify this parameter, the API returns information about all buckets. */ + /** The timestamp of a single bucket result. If you do not specify this + * parameter, the API returns information about all buckets. */ timestamp?: DateTime /** Skips the specified number of buckets. */ from?: integer @@ -18117,7 +26831,7 @@ export interface MlGetBucketsResponse { } export interface MlGetCalendarEventsRequest extends RequestBase { -/** A string that uniquely identifies a calendar. You can get information for multiple calendars by using a comma-separated list of ids or a wildcard expression. You can get information for all calendars by using `_all` or `*` or by omitting the calendar identifier. */ + /** A string that uniquely identifies a calendar. You can get information for multiple calendars by using a comma-separated list of ids or a wildcard expression. You can get information for all calendars by using `_all` or `*` or by omitting the calendar identifier. */ calendar_id: Id /** Specifies to get events with timestamps earlier than this time. */ end?: DateTime @@ -18141,13 +26855,16 @@ export interface MlGetCalendarEventsResponse { } export interface MlGetCalendarsCalendar { + /** A string that uniquely identifies a calendar. */ calendar_id: Id + /** A description of the calendar. */ description?: string + /** An array of anomaly detection job identifiers. */ job_ids: Id[] } export interface MlGetCalendarsRequest extends RequestBase { -/** A string that uniquely identifies a calendar. You can get information for multiple calendars by using a comma-separated list of ids or a wildcard expression. You can get information for all calendars by using `_all` or `*` or by omitting the calendar identifier. */ + /** A string that uniquely identifies a calendar. You can get information for multiple calendars by using a comma-separated list of ids or a wildcard expression. You can get information for all calendars by using `_all` or `*` or by omitting the calendar identifier. */ calendar_id?: Id /** Skips the specified number of calendars. This parameter is supported only when you omit the calendar identifier. */ from?: integer @@ -18167,9 +26884,13 @@ export interface MlGetCalendarsResponse { } export interface MlGetCategoriesRequest extends RequestBase { -/** Identifier for the anomaly detection job. */ + /** Identifier for the anomaly detection job. */ job_id: Id - /** Identifier for the category, which is unique in the job. If you specify neither the category ID nor the partition_field_value, the API returns information about all categories. If you specify only the partition_field_value, it returns information about all categories for the specified partition. */ + /** Identifier for the category, which is unique in the job. If you specify + * neither the category ID nor the partition_field_value, the API returns + * information about all categories. If you specify only the + * partition_field_value, it returns information about all categories for + * the specified partition. */ category_id?: CategoryId /** Skips the specified number of categories. */ from?: integer @@ -18177,7 +26898,8 @@ export interface MlGetCategoriesRequest extends RequestBase { partition_field_value?: string /** Specifies the maximum number of categories to obtain. */ size?: integer - /** Configures pagination. This parameter has the `from` and `size` properties. */ + /** Configures pagination. + * This parameter has the `from` and `size` properties. */ page?: MlPage /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { job_id?: never, category_id?: never, from?: never, partition_field_value?: never, size?: never, page?: never } @@ -18191,15 +26913,29 @@ export interface MlGetCategoriesResponse { } export interface MlGetDataFrameAnalyticsRequest extends RequestBase { -/** Identifier for the data frame analytics job. If you do not specify this option, the API returns information for the first hundred data frame analytics jobs. */ + /** Identifier for the data frame analytics job. If you do not specify this + * option, the API returns information for the first hundred data frame + * analytics jobs. */ id?: Id - /** Specifies what to do when the request: 1. Contains wildcard expressions and there are no data frame analytics jobs that match. 2. Contains the `_all` string or no identifiers and there are no matches. 3. Contains wildcard expressions and there are only partial matches. The default value returns an empty data_frame_analytics array when there are no matches and the subset of results when there are partial matches. If this parameter is `false`, the request returns a 404 status code when there are no matches or only partial matches. */ + /** Specifies what to do when the request: + * + * 1. Contains wildcard expressions and there are no data frame analytics + * jobs that match. + * 2. Contains the `_all` string or no identifiers and there are no matches. + * 3. Contains wildcard expressions and there are only partial matches. + * + * The default value returns an empty data_frame_analytics array when there + * are no matches and the subset of results when there are partial matches. + * If this parameter is `false`, the request returns a 404 status code when + * there are no matches or only partial matches. */ allow_no_match?: boolean /** Skips the specified number of data frame analytics jobs. */ from?: integer /** Specifies the maximum number of data frame analytics jobs to obtain. */ size?: integer - /** Indicates if certain fields should be removed from the configuration on retrieval. This allows the configuration to be in an acceptable format to be retrieved and then added to another cluster. */ + /** Indicates if certain fields should be removed from the configuration on + * retrieval. This allows the configuration to be in an acceptable format to + * be retrieved and then added to another cluster. */ exclude_generated?: boolean /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { id?: never, allow_no_match?: never, from?: never, size?: never, exclude_generated?: never } @@ -18209,13 +26945,26 @@ export interface MlGetDataFrameAnalyticsRequest extends RequestBase { export interface MlGetDataFrameAnalyticsResponse { count: integer + /** An array of data frame analytics job resources, which are sorted by the id value in ascending order. */ data_frame_analytics: MlDataframeAnalyticsSummary[] } export interface MlGetDataFrameAnalyticsStatsRequest extends RequestBase { -/** Identifier for the data frame analytics job. If you do not specify this option, the API returns information for the first hundred data frame analytics jobs. */ + /** Identifier for the data frame analytics job. If you do not specify this + * option, the API returns information for the first hundred data frame + * analytics jobs. */ id?: Id - /** Specifies what to do when the request: 1. Contains wildcard expressions and there are no data frame analytics jobs that match. 2. Contains the `_all` string or no identifiers and there are no matches. 3. Contains wildcard expressions and there are only partial matches. The default value returns an empty data_frame_analytics array when there are no matches and the subset of results when there are partial matches. If this parameter is `false`, the request returns a 404 status code when there are no matches or only partial matches. */ + /** Specifies what to do when the request: + * + * 1. Contains wildcard expressions and there are no data frame analytics + * jobs that match. + * 2. Contains the `_all` string or no identifiers and there are no matches. + * 3. Contains wildcard expressions and there are only partial matches. + * + * The default value returns an empty data_frame_analytics array when there + * are no matches and the subset of results when there are partial matches. + * If this parameter is `false`, the request returns a 404 status code when + * there are no matches or only partial matches. */ allow_no_match?: boolean /** Skips the specified number of data frame analytics jobs. */ from?: integer @@ -18231,13 +26980,25 @@ export interface MlGetDataFrameAnalyticsStatsRequest extends RequestBase { export interface MlGetDataFrameAnalyticsStatsResponse { count: long + /** An array of objects that contain usage information for data frame analytics jobs, which are sorted by the id value in ascending order. */ data_frame_analytics: MlDataframeAnalytics[] } export interface MlGetDatafeedStatsRequest extends RequestBase { -/** Identifier for the datafeed. It can be a datafeed identifier or a wildcard expression. If you do not specify one of these options, the API returns information about all datafeeds. */ + /** Identifier for the datafeed. It can be a datafeed identifier or a + * wildcard expression. If you do not specify one of these options, the API + * returns information about all datafeeds. */ datafeed_id?: Ids - /** Specifies what to do when the request: 1. Contains wildcard expressions and there are no datafeeds that match. 2. Contains the `_all` string or no identifiers and there are no matches. 3. Contains wildcard expressions and there are only partial matches. The default value is `true`, which returns an empty `datafeeds` array when there are no matches and the subset of results when there are partial matches. If this parameter is `false`, the request returns a `404` status code when there are no matches or only partial matches. */ + /** Specifies what to do when the request: + * + * 1. Contains wildcard expressions and there are no datafeeds that match. + * 2. Contains the `_all` string or no identifiers and there are no matches. + * 3. Contains wildcard expressions and there are only partial matches. + * + * The default value is `true`, which returns an empty `datafeeds` array + * when there are no matches and the subset of results when there are + * partial matches. If this parameter is `false`, the request returns a + * `404` status code when there are no matches or only partial matches. */ allow_no_match?: boolean /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { datafeed_id?: never, allow_no_match?: never } @@ -18251,11 +27012,24 @@ export interface MlGetDatafeedStatsResponse { } export interface MlGetDatafeedsRequest extends RequestBase { -/** Identifier for the datafeed. It can be a datafeed identifier or a wildcard expression. If you do not specify one of these options, the API returns information about all datafeeds. */ + /** Identifier for the datafeed. It can be a datafeed identifier or a + * wildcard expression. If you do not specify one of these options, the API + * returns information about all datafeeds. */ datafeed_id?: Ids - /** Specifies what to do when the request: 1. Contains wildcard expressions and there are no datafeeds that match. 2. Contains the `_all` string or no identifiers and there are no matches. 3. Contains wildcard expressions and there are only partial matches. The default value is `true`, which returns an empty `datafeeds` array when there are no matches and the subset of results when there are partial matches. If this parameter is `false`, the request returns a `404` status code when there are no matches or only partial matches. */ + /** Specifies what to do when the request: + * + * 1. Contains wildcard expressions and there are no datafeeds that match. + * 2. Contains the `_all` string or no identifiers and there are no matches. + * 3. Contains wildcard expressions and there are only partial matches. + * + * The default value is `true`, which returns an empty `datafeeds` array + * when there are no matches and the subset of results when there are + * partial matches. If this parameter is `false`, the request returns a + * `404` status code when there are no matches or only partial matches. */ allow_no_match?: boolean - /** Indicates if certain fields should be removed from the configuration on retrieval. This allows the configuration to be in an acceptable format to be retrieved and then added to another cluster. */ + /** Indicates if certain fields should be removed from the configuration on + * retrieval. This allows the configuration to be in an acceptable format to + * be retrieved and then added to another cluster. */ exclude_generated?: boolean /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { datafeed_id?: never, allow_no_match?: never, exclude_generated?: never } @@ -18269,7 +27043,7 @@ export interface MlGetDatafeedsResponse { } export interface MlGetFiltersRequest extends RequestBase { -/** A string that uniquely identifies a filter. */ + /** A string that uniquely identifies a filter. */ filter_id?: Ids /** Skips the specified number of filters. */ from?: integer @@ -18287,25 +27061,32 @@ export interface MlGetFiltersResponse { } export interface MlGetInfluencersRequest extends RequestBase { -/** Identifier for the anomaly detection job. */ + /** Identifier for the anomaly detection job. */ job_id: Id /** If true, the results are sorted in descending order. */ desc?: boolean - /** Returns influencers with timestamps earlier than this time. The default value means it is unset and results are not limited to specific timestamps. */ + /** Returns influencers with timestamps earlier than this time. + * The default value means it is unset and results are not limited to + * specific timestamps. */ end?: DateTime - /** If true, the output excludes interim results. By default, interim results are included. */ + /** If true, the output excludes interim results. By default, interim results + * are included. */ exclude_interim?: boolean - /** Returns influencers with anomaly scores greater than or equal to this value. */ + /** Returns influencers with anomaly scores greater than or equal to this + * value. */ influencer_score?: double /** Skips the specified number of influencers. */ from?: integer /** Specifies the maximum number of influencers to obtain. */ size?: integer - /** Specifies the sort field for the requested influencers. By default, the influencers are sorted by the `influencer_score` value. */ + /** Specifies the sort field for the requested influencers. By default, the + * influencers are sorted by the `influencer_score` value. */ sort?: Field - /** Returns influencers with timestamps after this time. The default value means it is unset and results are not limited to specific timestamps. */ + /** Returns influencers with timestamps after this time. The default value + * means it is unset and results are not limited to specific timestamps. */ start?: DateTime - /** Configures pagination. This parameter has the `from` and `size` properties. */ + /** Configures pagination. + * This parameter has the `from` and `size` properties. */ page?: MlPage /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { job_id?: never, desc?: never, end?: never, exclude_interim?: never, influencer_score?: never, from?: never, size?: never, sort?: never, start?: never, page?: never } @@ -18315,13 +27096,26 @@ export interface MlGetInfluencersRequest extends RequestBase { export interface MlGetInfluencersResponse { count: long + /** Array of influencer objects */ influencers: MlInfluencer[] } export interface MlGetJobStatsRequest extends RequestBase { -/** Identifier for the anomaly detection job. It can be a job identifier, a group name, a comma-separated list of jobs, or a wildcard expression. If you do not specify one of these options, the API returns information for all anomaly detection jobs. */ + /** Identifier for the anomaly detection job. It can be a job identifier, a + * group name, a comma-separated list of jobs, or a wildcard expression. If + * you do not specify one of these options, the API returns information for + * all anomaly detection jobs. */ job_id?: Id - /** Specifies what to do when the request: 1. Contains wildcard expressions and there are no jobs that match. 2. Contains the _all string or no identifiers and there are no matches. 3. Contains wildcard expressions and there are only partial matches. If `true`, the API returns an empty `jobs` array when there are no matches and the subset of results when there are partial matches. If `false`, the API returns a `404` status code when there are no matches or only partial matches. */ + /** Specifies what to do when the request: + * + * 1. Contains wildcard expressions and there are no jobs that match. + * 2. Contains the _all string or no identifiers and there are no matches. + * 3. Contains wildcard expressions and there are only partial matches. + * + * If `true`, the API returns an empty `jobs` array when + * there are no matches and the subset of results when there are partial + * matches. If `false`, the API returns a `404` status + * code when there are no matches or only partial matches. */ allow_no_match?: boolean /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { job_id?: never, allow_no_match?: never } @@ -18335,11 +27129,24 @@ export interface MlGetJobStatsResponse { } export interface MlGetJobsRequest extends RequestBase { -/** Identifier for the anomaly detection job. It can be a job identifier, a group name, or a wildcard expression. If you do not specify one of these options, the API returns information for all anomaly detection jobs. */ + /** Identifier for the anomaly detection job. It can be a job identifier, a + * group name, or a wildcard expression. If you do not specify one of these + * options, the API returns information for all anomaly detection jobs. */ job_id?: Ids - /** Specifies what to do when the request: 1. Contains wildcard expressions and there are no jobs that match. 2. Contains the _all string or no identifiers and there are no matches. 3. Contains wildcard expressions and there are only partial matches. The default value is `true`, which returns an empty `jobs` array when there are no matches and the subset of results when there are partial matches. If this parameter is `false`, the request returns a `404` status code when there are no matches or only partial matches. */ + /** Specifies what to do when the request: + * + * 1. Contains wildcard expressions and there are no jobs that match. + * 2. Contains the _all string or no identifiers and there are no matches. + * 3. Contains wildcard expressions and there are only partial matches. + * + * The default value is `true`, which returns an empty `jobs` array when + * there are no matches and the subset of results when there are partial + * matches. If this parameter is `false`, the request returns a `404` status + * code when there are no matches or only partial matches. */ allow_no_match?: boolean - /** Indicates if certain fields should be removed from the configuration on retrieval. This allows the configuration to be in an acceptable format to be retrieved and then added to another cluster. */ + /** Indicates if certain fields should be removed from the configuration on + * retrieval. This allows the configuration to be in an acceptable format to + * be retrieved and then added to another cluster. */ exclude_generated?: boolean /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { job_id?: never, allow_no_match?: never, exclude_generated?: never } @@ -18353,51 +27160,82 @@ export interface MlGetJobsResponse { } export interface MlGetMemoryStatsJvmStats { + /** Maximum amount of memory available for use by the heap. */ heap_max?: ByteSize + /** Maximum amount of memory, in bytes, available for use by the heap. */ heap_max_in_bytes: integer + /** Amount of Java heap currently being used for caching inference models. */ java_inference?: ByteSize + /** Amount of Java heap, in bytes, currently being used for caching inference models. */ java_inference_in_bytes: integer + /** Maximum amount of Java heap to be used for caching inference models. */ java_inference_max?: ByteSize + /** Maximum amount of Java heap, in bytes, to be used for caching inference models. */ java_inference_max_in_bytes: integer } export interface MlGetMemoryStatsMemMlStats { + /** Amount of native memory set aside for anomaly detection jobs. */ anomaly_detectors?: ByteSize + /** Amount of native memory, in bytes, set aside for anomaly detection jobs. */ anomaly_detectors_in_bytes: integer + /** Amount of native memory set aside for data frame analytics jobs. */ data_frame_analytics?: ByteSize + /** Amount of native memory, in bytes, set aside for data frame analytics jobs. */ data_frame_analytics_in_bytes: integer + /** Maximum amount of native memory (separate to the JVM heap) that may be used by machine learning native processes. */ max?: ByteSize + /** Maximum amount of native memory (separate to the JVM heap), in bytes, that may be used by machine learning native processes. */ max_in_bytes: integer + /** Amount of native memory set aside for loading machine learning native code shared libraries. */ native_code_overhead?: ByteSize + /** Amount of native memory, in bytes, set aside for loading machine learning native code shared libraries. */ native_code_overhead_in_bytes: integer + /** Amount of native memory set aside for trained models that have a PyTorch model_type. */ native_inference?: ByteSize + /** Amount of native memory, in bytes, set aside for trained models that have a PyTorch model_type. */ native_inference_in_bytes: integer } export interface MlGetMemoryStatsMemStats { + /** If the amount of physical memory has been overridden using the es.total_memory_bytes system property + * then this reports the overridden value. Otherwise it reports the same value as total. */ adjusted_total?: ByteSize + /** If the amount of physical memory has been overridden using the `es.total_memory_bytes` system property + * then this reports the overridden value in bytes. Otherwise it reports the same value as `total_in_bytes`. */ adjusted_total_in_bytes: integer + /** Total amount of physical memory. */ total?: ByteSize + /** Total amount of physical memory in bytes. */ total_in_bytes: integer + /** Contains statistics about machine learning use of native memory on the node. */ ml: MlGetMemoryStatsMemMlStats } export interface MlGetMemoryStatsMemory { attributes: Record + /** Contains Java Virtual Machine (JVM) statistics for the node. */ jvm: MlGetMemoryStatsJvmStats + /** Contains statistics about memory usage for the node. */ mem: MlGetMemoryStatsMemStats + /** Human-readable identifier for the node. Based on the Node name setting setting. */ name: Name + /** Roles assigned to the node. */ roles: string[] + /** The host and port where transport HTTP connections are accepted. */ transport_address: TransportAddress ephemeral_id: Id } export interface MlGetMemoryStatsRequest extends RequestBase { -/** The names of particular nodes in the cluster to target. For example, `nodeId1,nodeId2` or `ml:true` */ + /** The names of particular nodes in the cluster to target. For example, `nodeId1,nodeId2` or + * `ml:true` */ node_id?: Id - /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ + /** Period to wait for a connection to the master node. If no response is received before the timeout + * expires, the request fails and returns an error. */ master_timeout?: Duration - /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ + /** Period to wait for a response. If no response is received before the timeout expires, the request + * fails and returns an error. */ timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { node_id?: never, master_timeout?: never, timeout?: never } @@ -18412,11 +27250,21 @@ export interface MlGetMemoryStatsResponse { } export interface MlGetModelSnapshotUpgradeStatsRequest extends RequestBase { -/** Identifier for the anomaly detection job. */ + /** Identifier for the anomaly detection job. */ job_id: Id - /** A numerical character string that uniquely identifies the model snapshot. You can get information for multiple snapshots by using a comma-separated list or a wildcard expression. You can get all snapshots by using `_all`, by specifying `*` as the snapshot ID, or by omitting the snapshot ID. */ + /** A numerical character string that uniquely identifies the model snapshot. You can get information for multiple + * snapshots by using a comma-separated list or a wildcard expression. You can get all snapshots by using `_all`, + * by specifying `*` as the snapshot ID, or by omitting the snapshot ID. */ snapshot_id: Id - /** Specifies what to do when the request: - Contains wildcard expressions and there are no jobs that match. - Contains the _all string or no identifiers and there are no matches. - Contains wildcard expressions and there are only partial matches. The default value is true, which returns an empty jobs array when there are no matches and the subset of results when there are partial matches. If this parameter is false, the request returns a 404 status code when there are no matches or only partial matches. */ + /** Specifies what to do when the request: + * + * - Contains wildcard expressions and there are no jobs that match. + * - Contains the _all string or no identifiers and there are no matches. + * - Contains wildcard expressions and there are only partial matches. + * + * The default value is true, which returns an empty jobs array when there are no matches and the subset of results + * when there are partial matches. If this parameter is false, the request returns a 404 status code when there are + * no matches or only partial matches. */ allow_no_match?: boolean /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { job_id?: never, snapshot_id?: never, allow_no_match?: never } @@ -18430,9 +27278,11 @@ export interface MlGetModelSnapshotUpgradeStatsResponse { } export interface MlGetModelSnapshotsRequest extends RequestBase { -/** Identifier for the anomaly detection job. */ + /** Identifier for the anomaly detection job. */ job_id: Id - /** A numerical character string that uniquely identifies the model snapshot. You can get information for multiple snapshots by using a comma-separated list or a wildcard expression. You can get all snapshots by using `_all`, by specifying `*` as the snapshot ID, or by omitting the snapshot ID. */ + /** A numerical character string that uniquely identifies the model snapshot. You can get information for multiple + * snapshots by using a comma-separated list or a wildcard expression. You can get all snapshots by using `_all`, + * by specifying `*` as the snapshot ID, or by omitting the snapshot ID. */ snapshot_id?: Id /** Skips the specified number of snapshots. */ from?: integer @@ -18459,7 +27309,12 @@ export interface MlGetModelSnapshotsResponse { } export interface MlGetOverallBucketsRequest extends RequestBase { -/** Identifier for the anomaly detection job. It can be a job identifier, a group name, a comma-separated list of jobs or groups, or a wildcard expression. You can summarize the bucket results for all anomaly detection jobs by using `_all` or by specifying `*` as the ``. */ + /** Identifier for the anomaly detection job. It can be a job identifier, a + * group name, a comma-separated list of jobs or groups, or a wildcard + * expression. + * + * You can summarize the bucket results for all anomaly detection jobs by + * using `_all` or by specifying `*` as the ``. */ job_id: Id /** Refer to the description for the `allow_no_match` query parameter. */ allow_no_match?: boolean @@ -18483,11 +27338,12 @@ export interface MlGetOverallBucketsRequest extends RequestBase { export interface MlGetOverallBucketsResponse { count: long + /** Array of overall bucket objects */ overall_buckets: MlOverallBucket[] } export interface MlGetRecordsRequest extends RequestBase { -/** Identifier for the anomaly detection job. */ + /** Identifier for the anomaly detection job. */ job_id: Id /** Skips the specified number of records. */ from?: integer @@ -18518,39 +27374,63 @@ export interface MlGetRecordsResponse { } export interface MlGetTrainedModelsRequest extends RequestBase { -/** The unique identifier of the trained model or a model alias. You can get information for multiple trained models in a single API request by using a comma-separated list of model IDs or a wildcard expression. */ + /** The unique identifier of the trained model or a model alias. + * + * You can get information for multiple trained models in a single API + * request by using a comma-separated list of model IDs or a wildcard + * expression. */ model_id?: Ids - /** Specifies what to do when the request: - Contains wildcard expressions and there are no models that match. - Contains the _all string or no identifiers and there are no matches. - Contains wildcard expressions and there are only partial matches. If true, it returns an empty array when there are no matches and the subset of results when there are partial matches. */ + /** Specifies what to do when the request: + * + * - Contains wildcard expressions and there are no models that match. + * - Contains the _all string or no identifiers and there are no matches. + * - Contains wildcard expressions and there are only partial matches. + * + * If true, it returns an empty array when there are no matches and the + * subset of results when there are partial matches. */ allow_no_match?: boolean - /** Specifies whether the included model definition should be returned as a JSON map (true) or in a custom compressed format (false). */ + /** Specifies whether the included model definition should be returned as a + * JSON map (true) or in a custom compressed format (false). */ decompress_definition?: boolean - /** Indicates if certain fields should be removed from the configuration on retrieval. This allows the configuration to be in an acceptable format to be retrieved and then added to another cluster. */ + /** Indicates if certain fields should be removed from the configuration on + * retrieval. This allows the configuration to be in an acceptable format to + * be retrieved and then added to another cluster. */ exclude_generated?: boolean /** Skips the specified number of models. */ from?: integer - /** A comma delimited string of optional fields to include in the response body. */ + /** A comma delimited string of optional fields to include in the response + * body. */ include?: MlInclude - /** parameter is deprecated! Use [include=definition] instead */ - include_model_definition?: boolean /** Specifies the maximum number of models to obtain. */ size?: integer - /** A comma delimited string of tags. A trained model can have many tags, or none. When supplied, only trained models that contain all the supplied tags are returned. */ + /** A comma delimited string of tags. A trained model can have many tags, or + * none. When supplied, only trained models that contain all the supplied + * tags are returned. */ tags?: string | string[] /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { model_id?: never, allow_no_match?: never, decompress_definition?: never, exclude_generated?: never, from?: never, include?: never, include_model_definition?: never, size?: never, tags?: never } + body?: string | { [key: string]: any } & { model_id?: never, allow_no_match?: never, decompress_definition?: never, exclude_generated?: never, from?: never, include?: never, size?: never, tags?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { model_id?: never, allow_no_match?: never, decompress_definition?: never, exclude_generated?: never, from?: never, include?: never, include_model_definition?: never, size?: never, tags?: never } + querystring?: { [key: string]: any } & { model_id?: never, allow_no_match?: never, decompress_definition?: never, exclude_generated?: never, from?: never, include?: never, size?: never, tags?: never } } export interface MlGetTrainedModelsResponse { count: integer + /** An array of trained model resources, which are sorted by the model_id value in ascending order. */ trained_model_configs: MlTrainedModelConfig[] } export interface MlGetTrainedModelsStatsRequest extends RequestBase { -/** The unique identifier of the trained model or a model alias. It can be a comma-separated list or a wildcard expression. */ + /** The unique identifier of the trained model or a model alias. It can be a + * comma-separated list or a wildcard expression. */ model_id?: Ids - /** Specifies what to do when the request: - Contains wildcard expressions and there are no models that match. - Contains the _all string or no identifiers and there are no matches. - Contains wildcard expressions and there are only partial matches. If true, it returns an empty array when there are no matches and the subset of results when there are partial matches. */ + /** Specifies what to do when the request: + * + * - Contains wildcard expressions and there are no models that match. + * - Contains the _all string or no identifiers and there are no matches. + * - Contains wildcard expressions and there are only partial matches. + * + * If true, it returns an empty array when there are no matches and the + * subset of results when there are partial matches. */ allow_no_match?: boolean /** Skips the specified number of models. */ from?: integer @@ -18563,16 +27443,20 @@ export interface MlGetTrainedModelsStatsRequest extends RequestBase { } export interface MlGetTrainedModelsStatsResponse { + /** The total number of trained model statistics that matched the requested ID patterns. Could be higher than the number of items in the trained_model_stats array as the size of the array is restricted by the supplied size parameter. */ count: integer + /** An array of trained model statistics, which are sorted by the model_id value in ascending order. */ trained_model_stats: MlTrainedModelStats[] } export interface MlInferTrainedModelRequest extends RequestBase { -/** The unique identifier of the trained model. */ + /** The unique identifier of the trained model. */ model_id: Id /** Controls the amount of time to wait for inference results. */ timeout?: Duration - /** An array of objects to pass to the model for inference. The objects should contain a fields matching your configured trained model input. Typically, for NLP models, the field name is `text_field`. Currently, for NLP models, only a single value is allowed. */ + /** An array of objects to pass to the model for inference. The objects should contain a fields matching your + * configured trained model input. Typically, for NLP models, the field name is `text_field`. + * Currently, for NLP models, only a single value is allowed. */ docs: Record[] /** The inference configuration updates to apply on the API call */ inference_config?: MlInferenceConfigUpdateContainer @@ -18631,7 +27515,7 @@ export interface MlInfoResponse { } export interface MlOpenJobRequest extends RequestBase { -/** Identifier for the anomaly detection job. */ + /** Identifier for the anomaly detection job. */ job_id: Id /** Refer to the description for the `timeout` query parameter. */ timeout?: Duration @@ -18643,11 +27527,13 @@ export interface MlOpenJobRequest extends RequestBase { export interface MlOpenJobResponse { opened: boolean + /** The ID of the node that the job was started on. In serverless this will be the "serverless". + * If the job is allowed to open lazily and has not yet been assigned to a node, this value is an empty string. */ node: NodeId } export interface MlPostCalendarEventsRequest extends RequestBase { -/** A string that uniquely identifies a calendar. */ + /** A string that uniquely identifies a calendar. */ calendar_id: Id /** A list of one of more scheduled events. The event’s start and end times can be specified as integer milliseconds since the epoch or as a string in ISO 8601 format. */ events: MlCalendarEvent[] @@ -18662,7 +27548,7 @@ export interface MlPostCalendarEventsResponse { } export interface MlPostDataRequest extends RequestBase { -/** Identifier for the anomaly detection job. The job must have a state of open to receive and process the data. */ + /** Identifier for the anomaly detection job. The job must have a state of open to receive and process the data. */ job_id: Id /** Specifies the end of the bucket resetting range. */ reset_end?: DateTime @@ -18705,9 +27591,11 @@ export interface MlPreviewDataFrameAnalyticsDataframePreviewConfig { } export interface MlPreviewDataFrameAnalyticsRequest extends RequestBase { -/** Identifier for the data frame analytics job. */ + /** Identifier for the data frame analytics job. */ id?: Id - /** A data frame analytics config as described in create data frame analytics jobs. Note that `id` and `dest` don’t need to be provided in the context of this API. */ + /** A data frame analytics config as described in create data frame analytics + * jobs. Note that `id` and `dest` don’t need to be provided in the context of + * this API. */ config?: MlPreviewDataFrameAnalyticsDataframePreviewConfig /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { id?: never, config?: never } @@ -18716,11 +27604,15 @@ export interface MlPreviewDataFrameAnalyticsRequest extends RequestBase { } export interface MlPreviewDataFrameAnalyticsResponse { + /** An array of objects that contain feature name and value pairs. The features have been processed and indicate what will be sent to the model for training. */ feature_values: Record[] } export interface MlPreviewDatafeedRequest extends RequestBase { -/** A numerical character string that uniquely identifies the datafeed. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. NOTE: If you use this path parameter, you cannot provide datafeed or anomaly detection job configuration details in the request body. */ + /** A numerical character string that uniquely identifies the datafeed. This identifier can contain lowercase + * alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric + * characters. NOTE: If you use this path parameter, you cannot provide datafeed or anomaly detection job + * configuration details in the request body. */ datafeed_id?: Id /** The start time from where the datafeed preview should begin */ start?: DateTime @@ -18728,7 +27620,10 @@ export interface MlPreviewDatafeedRequest extends RequestBase { end?: DateTime /** The datafeed definition to preview. */ datafeed_config?: MlDatafeedConfig - /** The configuration details for the anomaly detection job that is associated with the datafeed. If the `datafeed_config` object does not include a `job_id` that references an existing anomaly detection job, you must supply this `job_config` object. If you include both a `job_id` and a `job_config`, the latter information is used. You cannot specify a `job_config` object unless you also supply a `datafeed_config` object. */ + /** The configuration details for the anomaly detection job that is associated with the datafeed. If the + * `datafeed_config` object does not include a `job_id` that references an existing anomaly detection job, you must + * supply this `job_config` object. If you include both a `job_id` and a `job_config`, the latter information is + * used. You cannot specify a `job_config` object unless you also supply a `datafeed_config` object. */ job_config?: MlJobConfig /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { datafeed_id?: never, start?: never, end?: never, datafeed_config?: never, job_config?: never } @@ -18739,7 +27634,7 @@ export interface MlPreviewDatafeedRequest extends RequestBase { export type MlPreviewDatafeedResponse = TDocument[] export interface MlPutCalendarRequest extends RequestBase { -/** A string that uniquely identifies a calendar. */ + /** A string that uniquely identifies a calendar. */ calendar_id: Id /** An array of anomaly detection job identifiers. */ job_ids?: Id[] @@ -18752,13 +27647,16 @@ export interface MlPutCalendarRequest extends RequestBase { } export interface MlPutCalendarResponse { + /** A string that uniquely identifies a calendar. */ calendar_id: Id + /** A description of the calendar. */ description?: string + /** A list of anomaly detection job identifiers or group names. */ job_ids: Ids } export interface MlPutCalendarJobRequest extends RequestBase { -/** A string that uniquely identifies a calendar. */ + /** A string that uniquely identifies a calendar. */ calendar_id: Id /** An identifier for the anomaly detection jobs. It can be a job identifier, a group name, or a comma-separated list of jobs or groups. */ job_id: Ids @@ -18769,28 +27667,76 @@ export interface MlPutCalendarJobRequest extends RequestBase { } export interface MlPutCalendarJobResponse { + /** A string that uniquely identifies a calendar. */ calendar_id: Id + /** A description of the calendar. */ description?: string + /** A list of anomaly detection job identifiers or group names. */ job_ids: Ids } export interface MlPutDataFrameAnalyticsRequest extends RequestBase { -/** Identifier for the data frame analytics job. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. */ + /** Identifier for the data frame analytics job. This identifier can contain + * lowercase alphanumeric characters (a-z and 0-9), hyphens, and + * underscores. It must start and end with alphanumeric characters. */ id: Id - /** Specifies whether this job can start when there is insufficient machine learning node capacity for it to be immediately assigned to a node. If set to `false` and a machine learning node with capacity to run the job cannot be immediately found, the API returns an error. If set to `true`, the API does not return an error; the job waits in the `starting` state until sufficient machine learning node capacity is available. This behavior is also affected by the cluster-wide `xpack.ml.max_lazy_ml_nodes` setting. */ + /** Specifies whether this job can start when there is insufficient machine + * learning node capacity for it to be immediately assigned to a node. If + * set to `false` and a machine learning node with capacity to run the job + * cannot be immediately found, the API returns an error. If set to `true`, + * the API does not return an error; the job waits in the `starting` state + * until sufficient machine learning node capacity is available. This + * behavior is also affected by the cluster-wide + * `xpack.ml.max_lazy_ml_nodes` setting. */ allow_lazy_start?: boolean - /** The analysis configuration, which contains the information necessary to perform one of the following types of analysis: classification, outlier detection, or regression. */ + /** The analysis configuration, which contains the information necessary to + * perform one of the following types of analysis: classification, outlier + * detection, or regression. */ analysis: MlDataframeAnalysisContainer - /** Specifies `includes` and/or `excludes` patterns to select which fields will be included in the analysis. The patterns specified in `excludes` are applied last, therefore `excludes` takes precedence. In other words, if the same field is specified in both `includes` and `excludes`, then the field will not be included in the analysis. If `analyzed_fields` is not set, only the relevant fields will be included. For example, all the numeric fields for outlier detection. The supported fields vary for each type of analysis. Outlier detection requires numeric or `boolean` data to analyze. The algorithms don’t support missing values therefore fields that have data types other than numeric or boolean are ignored. Documents where included fields contain missing values, null values, or an array are also ignored. Therefore the `dest` index may contain documents that don’t have an outlier score. Regression supports fields that are numeric, `boolean`, `text`, `keyword`, and `ip` data types. It is also tolerant of missing values. Fields that are supported are included in the analysis, other fields are ignored. Documents where included fields contain an array with two or more values are also ignored. Documents in the `dest` index that don’t contain a results field are not included in the regression analysis. Classification supports fields that are numeric, `boolean`, `text`, `keyword`, and `ip` data types. It is also tolerant of missing values. Fields that are supported are included in the analysis, other fields are ignored. Documents where included fields contain an array with two or more values are also ignored. Documents in the `dest` index that don’t contain a results field are not included in the classification analysis. Classification analysis can be improved by mapping ordinal variable values to a single number. For example, in case of age ranges, you can model the values as `0-14 = 0`, `15-24 = 1`, `25-34 = 2`, and so on. */ + /** Specifies `includes` and/or `excludes` patterns to select which fields + * will be included in the analysis. The patterns specified in `excludes` + * are applied last, therefore `excludes` takes precedence. In other words, + * if the same field is specified in both `includes` and `excludes`, then + * the field will not be included in the analysis. If `analyzed_fields` is + * not set, only the relevant fields will be included. For example, all the + * numeric fields for outlier detection. + * The supported fields vary for each type of analysis. Outlier detection + * requires numeric or `boolean` data to analyze. The algorithms don’t + * support missing values therefore fields that have data types other than + * numeric or boolean are ignored. Documents where included fields contain + * missing values, null values, or an array are also ignored. Therefore the + * `dest` index may contain documents that don’t have an outlier score. + * Regression supports fields that are numeric, `boolean`, `text`, + * `keyword`, and `ip` data types. It is also tolerant of missing values. + * Fields that are supported are included in the analysis, other fields are + * ignored. Documents where included fields contain an array with two or + * more values are also ignored. Documents in the `dest` index that don’t + * contain a results field are not included in the regression analysis. + * Classification supports fields that are numeric, `boolean`, `text`, + * `keyword`, and `ip` data types. It is also tolerant of missing values. + * Fields that are supported are included in the analysis, other fields are + * ignored. Documents where included fields contain an array with two or + * more values are also ignored. Documents in the `dest` index that don’t + * contain a results field are not included in the classification analysis. + * Classification analysis can be improved by mapping ordinal variable + * values to a single number. For example, in case of age ranges, you can + * model the values as `0-14 = 0`, `15-24 = 1`, `25-34 = 2`, and so on. */ analyzed_fields?: MlDataframeAnalysisAnalyzedFields | string[] /** A description of the job. */ description?: string /** The destination configuration. */ dest: MlDataframeAnalyticsDestination - /** The maximum number of threads to be used by the analysis. Using more threads may decrease the time necessary to complete the analysis at the cost of using more CPU. Note that the process may use additional threads for operational functionality other than the analysis itself. */ + /** The maximum number of threads to be used by the analysis. Using more + * threads may decrease the time necessary to complete the analysis at the + * cost of using more CPU. Note that the process may use additional threads + * for operational functionality other than the analysis itself. */ max_num_threads?: integer _meta?: Metadata - /** The approximate maximum amount of memory resources that are permitted for analytical processing. If your `elasticsearch.yml` file contains an `xpack.ml.max_model_memory_limit` setting, an error occurs when you try to create data frame analytics jobs that have `model_memory_limit` values greater than that setting. */ + /** The approximate maximum amount of memory resources that are permitted for + * analytical processing. If your `elasticsearch.yml` file contains an + * `xpack.ml.max_model_memory_limit` setting, an error occurs when you try + * to create data frame analytics jobs that have `model_memory_limit` values + * greater than that setting. */ model_memory_limit?: string /** The configuration of how to source the analysis data. */ source: MlDataframeAnalyticsSource @@ -18819,47 +27765,76 @@ export interface MlPutDataFrameAnalyticsResponse { } export interface MlPutDatafeedRequest extends RequestBase { -/** A numerical character string that uniquely identifies the datafeed. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. */ + /** A numerical character string that uniquely identifies the datafeed. + * This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. + * It must start and end with alphanumeric characters. */ datafeed_id: Id - /** If true, wildcard indices expressions that resolve into no concrete indices are ignored. This includes the `_all` string or when no indices are specified. */ + /** If true, wildcard indices expressions that resolve into no concrete indices are ignored. This includes the `_all` + * string or when no indices are specified. */ allow_no_indices?: boolean - /** Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values. */ + /** Type of index that wildcard patterns can match. If the request can target data streams, this argument determines + * whether wildcard expressions match hidden data streams. Supports comma-separated values. */ expand_wildcards?: ExpandWildcards /** If true, concrete, expanded, or aliased indices are ignored when frozen. */ ignore_throttled?: boolean /** If true, unavailable indices (missing or closed) are ignored. */ ignore_unavailable?: boolean - /** If set, the datafeed performs aggregation searches. Support for aggregations is limited and should be used only with low cardinality data. */ + /** If set, the datafeed performs aggregation searches. + * Support for aggregations is limited and should be used only with low cardinality data. */ aggregations?: Record - /** @alias aggregations */ - /** If set, the datafeed performs aggregation searches. Support for aggregations is limited and should be used only with low cardinality data. */ + /** If set, the datafeed performs aggregation searches. + * Support for aggregations is limited and should be used only with low cardinality data. + * @alias aggregations */ aggs?: Record - /** Datafeeds might be required to search over long time periods, for several months or years. This search is split into time chunks in order to ensure the load on Elasticsearch is managed. Chunking configuration controls how the size of these time chunks are calculated; it is an advanced configuration option. */ + /** Datafeeds might be required to search over long time periods, for several months or years. + * This search is split into time chunks in order to ensure the load on Elasticsearch is managed. + * Chunking configuration controls how the size of these time chunks are calculated; + * it is an advanced configuration option. */ chunking_config?: MlChunkingConfig - /** Specifies whether the datafeed checks for missing data and the size of the window. The datafeed can optionally search over indices that have already been read in an effort to determine whether any data has subsequently been added to the index. If missing data is found, it is a good indication that the `query_delay` is set too low and the data is being indexed after the datafeed has passed that moment in time. This check runs only on real-time datafeeds. */ + /** Specifies whether the datafeed checks for missing data and the size of the window. + * The datafeed can optionally search over indices that have already been read in an effort to determine whether + * any data has subsequently been added to the index. If missing data is found, it is a good indication that the + * `query_delay` is set too low and the data is being indexed after the datafeed has passed that moment in time. + * This check runs only on real-time datafeeds. */ delayed_data_check_config?: MlDelayedDataCheckConfig - /** The interval at which scheduled queries are made while the datafeed runs in real time. The default value is either the bucket span for short bucket spans, or, for longer bucket spans, a sensible fraction of the bucket span. When `frequency` is shorter than the bucket span, interim results for the last (partial) bucket are written then eventually overwritten by the full bucket results. If the datafeed uses aggregations, this value must be divisible by the interval of the date histogram aggregation. */ + /** The interval at which scheduled queries are made while the datafeed runs in real time. + * The default value is either the bucket span for short bucket spans, or, for longer bucket spans, a sensible + * fraction of the bucket span. When `frequency` is shorter than the bucket span, interim results for the last + * (partial) bucket are written then eventually overwritten by the full bucket results. If the datafeed uses + * aggregations, this value must be divisible by the interval of the date histogram aggregation. */ frequency?: Duration - /** An array of index names. Wildcards are supported. If any of the indices are in remote clusters, the machine learning nodes must have the `remote_cluster_client` role. */ + /** An array of index names. Wildcards are supported. If any of the indices are in remote clusters, the master + * nodes and the machine learning nodes must have the `remote_cluster_client` role. */ indices?: Indices - /** @alias indices */ - /** An array of index names. Wildcards are supported. If any of the indices are in remote clusters, the machine learning nodes must have the `remote_cluster_client` role. */ + /** An array of index names. Wildcards are supported. If any of the indices are in remote clusters, the master + * nodes and the machine learning nodes must have the `remote_cluster_client` role. + * @alias indices */ indexes?: Indices /** Specifies index expansion options that are used during search */ indices_options?: IndicesOptions /** Identifier for the anomaly detection job. */ job_id?: Id - /** If a real-time datafeed has never seen any data (including during any initial training period), it automatically stops and closes the associated job after this many real-time searches return no documents. In other words, it stops after `frequency` times `max_empty_searches` of real-time operation. If not set, a datafeed with no end time that sees no data remains started until it is explicitly stopped. By default, it is not set. */ + /** If a real-time datafeed has never seen any data (including during any initial training period), it automatically + * stops and closes the associated job after this many real-time searches return no documents. In other words, + * it stops after `frequency` times `max_empty_searches` of real-time operation. If not set, a datafeed with no + * end time that sees no data remains started until it is explicitly stopped. By default, it is not set. */ max_empty_searches?: integer - /** The Elasticsearch query domain-specific language (DSL). This value corresponds to the query object in an Elasticsearch search POST body. All the options that are supported by Elasticsearch can be used, as this object is passed verbatim to Elasticsearch. */ + /** The Elasticsearch query domain-specific language (DSL). This value corresponds to the query object in an + * Elasticsearch search POST body. All the options that are supported by Elasticsearch can be used, as this + * object is passed verbatim to Elasticsearch. */ query?: QueryDslQueryContainer - /** The number of seconds behind real time that data is queried. For example, if data from 10:04 a.m. might not be searchable in Elasticsearch until 10:06 a.m., set this property to 120 seconds. The default value is randomly selected between `60s` and `120s`. This randomness improves the query performance when there are multiple jobs running on the same node. */ + /** The number of seconds behind real time that data is queried. For example, if data from 10:04 a.m. might + * not be searchable in Elasticsearch until 10:06 a.m., set this property to 120 seconds. The default + * value is randomly selected between `60s` and `120s`. This randomness improves the query performance + * when there are multiple jobs running on the same node. */ query_delay?: Duration /** Specifies runtime fields for the datafeed search. */ runtime_mappings?: MappingRuntimeFields - /** Specifies scripts that evaluate custom expressions and returns script fields to the datafeed. The detector configuration objects in a job can contain functions that use these script fields. */ + /** Specifies scripts that evaluate custom expressions and returns script fields to the datafeed. + * The detector configuration objects in a job can contain functions that use these script fields. */ script_fields?: Record - /** The size parameter that is used in Elasticsearch searches when the datafeed does not use aggregations. The maximum value is the value of `index.max_result_window`, which is 10,000 by default. */ + /** The size parameter that is used in Elasticsearch searches when the datafeed does not use aggregations. + * The maximum value is the value of `index.max_result_window`, which is 10,000 by default. */ scroll_size?: integer headers?: HttpHeaders /** All values in `body` will be added to the request body. */ @@ -18887,11 +27862,12 @@ export interface MlPutDatafeedResponse { } export interface MlPutFilterRequest extends RequestBase { -/** A string that uniquely identifies a filter. */ + /** A string that uniquely identifies a filter. */ filter_id: Id /** A description of the filter. */ description?: string - /** The items of the filter. A wildcard `*` can be used at the beginning or the end of an item. Up to 10000 items are allowed in each filter. */ + /** The items of the filter. A wildcard `*` can be used at the beginning or the end of an item. + * Up to 10000 items are allowed in each filter. */ items?: string[] /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { filter_id?: never, description?: never, items?: never } @@ -18906,11 +27882,19 @@ export interface MlPutFilterResponse { } export interface MlPutJobRequest extends RequestBase { -/** The identifier for the anomaly detection job. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. */ + /** The identifier for the anomaly detection job. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. */ job_id: Id - /** If `true`, wildcard indices expressions that resolve into no concrete indices are ignored. This includes the `_all` string or when no indices are specified. */ + /** If `true`, wildcard indices expressions that resolve into no concrete indices are ignored. This includes the + * `_all` string or when no indices are specified. */ allow_no_indices?: boolean - /** Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values. Valid values are: * `all`: Match any data stream or index, including hidden ones. * `closed`: Match closed, non-hidden indices. Also matches any non-hidden data stream. Data streams cannot be closed. * `hidden`: Match hidden data streams and hidden indices. Must be combined with `open`, `closed`, or both. * `none`: Wildcard patterns are not accepted. * `open`: Match open, non-hidden indices. Also matches any non-hidden data stream. */ + /** Type of index that wildcard patterns can match. If the request can target data streams, this argument determines + * whether wildcard expressions match hidden data streams. Supports comma-separated values. Valid values are: + * + * * `all`: Match any data stream or index, including hidden ones. + * * `closed`: Match closed, non-hidden indices. Also matches any non-hidden data stream. Data streams cannot be closed. + * * `hidden`: Match hidden data streams and hidden indices. Must be combined with `open`, `closed`, or both. + * * `none`: Wildcard patterns are not accepted. + * * `open`: Match open, non-hidden indices. Also matches any non-hidden data stream. */ expand_wildcards?: ExpandWildcards /** If `true`, concrete, expanded or aliased indices are ignored when frozen. */ ignore_throttled?: boolean @@ -18983,7 +27967,9 @@ export interface MlPutTrainedModelAggregateOutput { } export interface MlPutTrainedModelDefinition { + /** Collection of preprocessors */ preprocessors?: MlPutTrainedModelPreprocessor[] + /** The definition of the trained model. */ trained_model: MlPutTrainedModelTrainedModel } @@ -19017,19 +28003,28 @@ export interface MlPutTrainedModelPreprocessor { } export interface MlPutTrainedModelRequest extends RequestBase { -/** The unique identifier of the trained model. */ + /** The unique identifier of the trained model. */ model_id: Id - /** If set to `true` and a `compressed_definition` is provided, the request defers definition decompression and skips relevant validations. */ + /** If set to `true` and a `compressed_definition` is provided, + * the request defers definition decompression and skips relevant + * validations. */ defer_definition_decompression?: boolean - /** Whether to wait for all child operations (e.g. model download) to complete. */ + /** Whether to wait for all child operations (e.g. model download) + * to complete. */ wait_for_completion?: boolean - /** The compressed (GZipped and Base64 encoded) inference definition of the model. If compressed_definition is specified, then definition cannot be specified. */ + /** The compressed (GZipped and Base64 encoded) inference definition of the + * model. If compressed_definition is specified, then definition cannot be + * specified. */ compressed_definition?: string - /** The inference definition for the model. If definition is specified, then compressed_definition cannot be specified. */ + /** The inference definition for the model. If definition is specified, then + * compressed_definition cannot be specified. */ definition?: MlPutTrainedModelDefinition /** A human-readable description of the inference trained model. */ description?: string - /** The default configuration for inference. This can be either a regression or classification configuration. It must match the underlying definition.trained_model's target_type. For pre-packaged models such as ELSER the config is not required. */ + /** The default configuration for inference. This can be either a regression + * or classification configuration. It must match the underlying + * definition.trained_model's target_type. For pre-packaged models such as + * ELSER the config is not required. */ inference_config?: MlInferenceConfigCreateContainer /** The input field names for the model definition. */ input?: MlPutTrainedModelInput @@ -19037,9 +28032,17 @@ export interface MlPutTrainedModelRequest extends RequestBase { metadata?: any /** The model type. */ model_type?: MlTrainedModelType - /** The estimated memory usage in bytes to keep the trained model in memory. This property is supported only if defer_definition_decompression is true or the model definition is not supplied. */ + /** The estimated memory usage in bytes to keep the trained model in memory. + * This property is supported only if defer_definition_decompression is true + * or the model definition is not supplied. */ model_size_bytes?: long - /** The platform architecture (if applicable) of the trained mode. If the model only works on one platform, because it is heavily optimized for a particular processor architecture and OS combination, then this field specifies which. The format of the string must match the platform identifiers used by Elasticsearch, so one of, `linux-x86_64`, `linux-aarch64`, `darwin-x86_64`, `darwin-aarch64`, or `windows-x86_64`. For portable models (those that work independent of processor architecture or OS features), leave this field unset. */ + /** The platform architecture (if applicable) of the trained mode. If the model + * only works on one platform, because it is heavily optimized for a particular + * processor architecture and OS combination, then this field specifies which. + * The format of the string must match the platform identifiers used by Elasticsearch, + * so one of, `linux-x86_64`, `linux-aarch64`, `darwin-x86_64`, `darwin-aarch64`, + * or `windows-x86_64`. For portable models (those that work independent of processor + * architecture or OS features), leave this field unset. */ platform_architecture?: string /** An array of tags to organize the model. */ tags?: string[] @@ -19061,8 +28064,14 @@ export interface MlPutTrainedModelTargetMeanEncodingPreprocessor { } export interface MlPutTrainedModelTrainedModel { + /** The definition for a binary decision tree. */ tree?: MlPutTrainedModelTrainedModelTree + /** The definition of a node in a tree. + * There are two major types of nodes: leaf nodes and not-leaf nodes. + * - Leaf nodes only need node_index and leaf_value defined. + * - All other nodes need split_feature, left_child, right_child, threshold, decision_type, and default_left defined. */ tree_node?: MlPutTrainedModelTrainedModelTreeNode + /** The definition for an ensemble model */ ensemble?: MlPutTrainedModelEnsemble } @@ -19090,11 +28099,13 @@ export interface MlPutTrainedModelWeights { } export interface MlPutTrainedModelAliasRequest extends RequestBase { -/** The alias to create or update. This value cannot end in numbers. */ + /** The alias to create or update. This value cannot end in numbers. */ model_alias: Name /** The identifier for the trained model that the alias refers to. */ model_id: Id - /** Specifies whether the alias gets reassigned to the specified trained model if it is already assigned to a different model. If the alias is already assigned and this parameter is false, the API returns an error. */ + /** Specifies whether the alias gets reassigned to the specified trained + * model if it is already assigned to a different model. If the alias is + * already assigned and this parameter is false, the API returns an error. */ reassign?: boolean /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { model_alias?: never, model_id?: never, reassign?: never } @@ -19105,9 +28116,10 @@ export interface MlPutTrainedModelAliasRequest extends RequestBase { export type MlPutTrainedModelAliasResponse = AcknowledgedResponseBase export interface MlPutTrainedModelDefinitionPartRequest extends RequestBase { -/** The unique identifier of the trained model. */ + /** The unique identifier of the trained model. */ model_id: Id - /** The definition part number. When the definition is loaded for inference the definition parts are streamed in the order of their part number. The first part must be `0` and the final part must be `total_parts - 1`. */ + /** The definition part number. When the definition is loaded for inference the definition parts are streamed in the + * order of their part number. The first part must be `0` and the final part must be `total_parts - 1`. */ part: integer /** The definition part for the model. Must be a base64 encoded string. */ definition: string @@ -19124,7 +28136,7 @@ export interface MlPutTrainedModelDefinitionPartRequest extends RequestBase { export type MlPutTrainedModelDefinitionPartResponse = AcknowledgedResponseBase export interface MlPutTrainedModelVocabularyRequest extends RequestBase { -/** The unique identifier of the trained model. */ + /** The unique identifier of the trained model. */ model_id: Id /** The model vocabulary, which must not be empty. */ vocabulary: string[] @@ -19141,11 +28153,14 @@ export interface MlPutTrainedModelVocabularyRequest extends RequestBase { export type MlPutTrainedModelVocabularyResponse = AcknowledgedResponseBase export interface MlResetJobRequest extends RequestBase { -/** The ID of the job to reset. */ + /** The ID of the job to reset. */ job_id: Id - /** Should this request wait until the operation has completed before returning. */ + /** Should this request wait until the operation has completed before + * returning. */ wait_for_completion?: boolean - /** Specifies whether annotations that have been added by the user should be deleted along with any auto-generated annotations when the job is reset. */ + /** Specifies whether annotations that have been added by the + * user should be deleted along with any auto-generated annotations when the job is + * reset. */ delete_user_annotations?: boolean /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { job_id?: never, wait_for_completion?: never, delete_user_annotations?: never } @@ -19156,9 +28171,11 @@ export interface MlResetJobRequest extends RequestBase { export type MlResetJobResponse = AcknowledgedResponseBase export interface MlRevertModelSnapshotRequest extends RequestBase { -/** Identifier for the anomaly detection job. */ + /** Identifier for the anomaly detection job. */ job_id: Id - /** You can specify `empty` as the . Reverting to the empty snapshot means the anomaly detection job starts learning a new model from scratch when it is started. */ + /** You can specify `empty` as the . Reverting to the empty + * snapshot means the anomaly detection job starts learning a new model from + * scratch when it is started. */ snapshot_id: Id /** Refer to the description for the `delete_intervening_results` query parameter. */ delete_intervening_results?: boolean @@ -19173,7 +28190,9 @@ export interface MlRevertModelSnapshotResponse { } export interface MlSetUpgradeModeRequest extends RequestBase { -/** When `true`, it enables `upgrade_mode` which temporarily halts all job and datafeed tasks and prohibits new job and datafeed tasks from starting. */ + /** When `true`, it enables `upgrade_mode` which temporarily halts all job + * and datafeed tasks and prohibits new job and datafeed tasks from + * starting. */ enabled?: boolean /** The time to wait for the request to be completed. */ timeout?: Duration @@ -19186,9 +28205,12 @@ export interface MlSetUpgradeModeRequest extends RequestBase { export type MlSetUpgradeModeResponse = AcknowledgedResponseBase export interface MlStartDataFrameAnalyticsRequest extends RequestBase { -/** Identifier for the data frame analytics job. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. */ + /** Identifier for the data frame analytics job. This identifier can contain + * lowercase alphanumeric characters (a-z and 0-9), hyphens, and + * underscores. It must start and end with alphanumeric characters. */ id: Id - /** Controls the amount of time to wait until the data frame analytics job starts. */ + /** Controls the amount of time to wait until the data frame analytics job + * starts. */ timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { id?: never, timeout?: never } @@ -19198,11 +28220,18 @@ export interface MlStartDataFrameAnalyticsRequest extends RequestBase { export interface MlStartDataFrameAnalyticsResponse { acknowledged: boolean + /** The ID of the node that the job was started on. If the job is allowed to open lazily and has not yet been assigned to a node, this value is an empty string. + * The node ID of the node the job has been assigned to, or + * an empty string if it hasn't been assigned to a node. In + * serverless if the job has been assigned to run then the + * node ID will be "serverless". */ node: NodeId } export interface MlStartDatafeedRequest extends RequestBase { -/** A numerical character string that uniquely identifies the datafeed. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. */ + /** A numerical character string that uniquely identifies the datafeed. This identifier can contain lowercase + * alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric + * characters. */ datafeed_id: Id /** Refer to the description for the `end` query parameter. */ end?: DateTime @@ -19217,30 +28246,49 @@ export interface MlStartDatafeedRequest extends RequestBase { } export interface MlStartDatafeedResponse { + /** The ID of the node that the job was started on. In serverless this will be the "serverless". + * If the job is allowed to open lazily and has not yet been assigned to a node, this value is an empty string. */ node: NodeIds + /** For a successful response, this value is always `true`. On failure, an exception is returned instead. */ started: boolean } export interface MlStartTrainedModelDeploymentRequest extends RequestBase { -/** The unique identifier of the trained model. Currently, only PyTorch models are supported. */ + /** The unique identifier of the trained model. Currently, only PyTorch models are supported. */ model_id: Id - /** The inference cache size (in memory outside the JVM heap) per node for the model. The default value is the same size as the `model_size_bytes`. To disable the cache, `0b` can be provided. */ + /** The inference cache size (in memory outside the JVM heap) per node for the model. + * The default value is the same size as the `model_size_bytes`. To disable the cache, + * `0b` can be provided. */ cache_size?: ByteSize - /** A unique identifier for the deployment of the model. */ + /** A unique identifier for the deployment of the model. + * @remarks This property is not supported on Elastic Cloud Serverless. */ deployment_id?: string - /** The number of model allocations on each node where the model is deployed. All allocations on a node share the same copy of the model in memory but use a separate set of threads to evaluate the model. Increasing this value generally increases the throughput. If this setting is greater than the number of hardware threads it will automatically be changed to a value less than the number of hardware threads. If adaptive_allocations is enabled, do not set this value, because it’s automatically set. */ + /** The number of model allocations on each node where the model is deployed. + * All allocations on a node share the same copy of the model in memory but use + * a separate set of threads to evaluate the model. + * Increasing this value generally increases the throughput. + * If this setting is greater than the number of hardware threads + * it will automatically be changed to a value less than the number of hardware threads. + * If adaptive_allocations is enabled, do not set this value, because it’s automatically set. */ number_of_allocations?: integer /** The deployment priority. */ priority?: MlTrainingPriority - /** Specifies the number of inference requests that are allowed in the queue. After the number of requests exceeds this value, new requests are rejected with a 429 error. */ + /** Specifies the number of inference requests that are allowed in the queue. After the number of requests exceeds + * this value, new requests are rejected with a 429 error. */ queue_capacity?: integer - /** Sets the number of threads used by each model allocation during inference. This generally increases the inference speed. The inference process is a compute-bound process; any number greater than the number of available hardware threads on the machine does not increase the inference speed. If this setting is greater than the number of hardware threads it will automatically be changed to a value less than the number of hardware threads. */ + /** Sets the number of threads used by each model allocation during inference. This generally increases + * the inference speed. The inference process is a compute-bound process; any number + * greater than the number of available hardware threads on the machine does not increase the + * inference speed. If this setting is greater than the number of hardware threads + * it will automatically be changed to a value less than the number of hardware threads. */ threads_per_allocation?: integer /** Specifies the amount of time to wait for the model to deploy. */ timeout?: Duration /** Specifies the allocation status to wait for before returning. */ wait_for?: MlDeploymentAllocationState - /** Adaptive allocations configuration. When enabled, the number of allocations is set based on the current load. If adaptive_allocations is enabled, do not set the number of allocations manually. */ + /** Adaptive allocations configuration. When enabled, the number of allocations + * is set based on the current load. + * If adaptive_allocations is enabled, do not set the number of allocations manually. */ adaptive_allocations?: MlAdaptiveAllocationsSettings /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { model_id?: never, cache_size?: never, deployment_id?: never, number_of_allocations?: never, priority?: never, queue_capacity?: never, threads_per_allocation?: never, timeout?: never, wait_for?: never, adaptive_allocations?: never } @@ -19253,13 +28301,26 @@ export interface MlStartTrainedModelDeploymentResponse { } export interface MlStopDataFrameAnalyticsRequest extends RequestBase { -/** Identifier for the data frame analytics job. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. */ + /** Identifier for the data frame analytics job. This identifier can contain + * lowercase alphanumeric characters (a-z and 0-9), hyphens, and + * underscores. It must start and end with alphanumeric characters. */ id: Id - /** Specifies what to do when the request: 1. Contains wildcard expressions and there are no data frame analytics jobs that match. 2. Contains the _all string or no identifiers and there are no matches. 3. Contains wildcard expressions and there are only partial matches. The default value is true, which returns an empty data_frame_analytics array when there are no matches and the subset of results when there are partial matches. If this parameter is false, the request returns a 404 status code when there are no matches or only partial matches. */ + /** Specifies what to do when the request: + * + * 1. Contains wildcard expressions and there are no data frame analytics + * jobs that match. + * 2. Contains the _all string or no identifiers and there are no matches. + * 3. Contains wildcard expressions and there are only partial matches. + * + * The default value is true, which returns an empty data_frame_analytics + * array when there are no matches and the subset of results when there are + * partial matches. If this parameter is false, the request returns a 404 + * status code when there are no matches or only partial matches. */ allow_no_match?: boolean /** If true, the data frame analytics job is stopped forcefully. */ force?: boolean - /** Controls the amount of time to wait until the data frame analytics job stops. Defaults to 20 seconds. */ + /** Controls the amount of time to wait until the data frame analytics job + * stops. Defaults to 20 seconds. */ timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { id?: never, allow_no_match?: never, force?: never, timeout?: never } @@ -19272,7 +28333,9 @@ export interface MlStopDataFrameAnalyticsResponse { } export interface MlStopDatafeedRequest extends RequestBase { -/** Identifier for the datafeed. You can stop multiple datafeeds in a single API request by using a comma-separated list of datafeeds or a wildcard expression. You can close all datafeeds by using `_all` or by specifying `*` as the identifier. */ + /** Identifier for the datafeed. You can stop multiple datafeeds in a single API request by using a comma-separated + * list of datafeeds or a wildcard expression. You can close all datafeeds by using `_all` or by specifying `*` as + * the identifier. */ datafeed_id: Id /** Refer to the description for the `allow_no_match` query parameter. */ allow_no_match?: boolean @@ -19291,11 +28354,15 @@ export interface MlStopDatafeedResponse { } export interface MlStopTrainedModelDeploymentRequest extends RequestBase { -/** The unique identifier of the trained model. */ + /** The unique identifier of the trained model. */ model_id: Id - /** Specifies what to do when the request: contains wildcard expressions and there are no deployments that match; contains the `_all` string or no identifiers and there are no matches; or contains wildcard expressions and there are only partial matches. By default, it returns an empty array when there are no matches and the subset of results when there are partial matches. If `false`, the request returns a 404 status code when there are no matches or only partial matches. */ + /** Specifies what to do when the request: contains wildcard expressions and there are no deployments that match; + * contains the `_all` string or no identifiers and there are no matches; or contains wildcard expressions and + * there are only partial matches. By default, it returns an empty array when there are no matches and the subset of results when there are partial matches. + * If `false`, the request returns a 404 status code when there are no matches or only partial matches. */ allow_no_match?: boolean - /** Forcefully stops the deployment, even if it is used by ingest pipelines. You can't use these pipelines until you restart the model deployment. */ + /** Forcefully stops the deployment, even if it is used by ingest pipelines. You can't use these pipelines until you + * restart the model deployment. */ force?: boolean /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { model_id?: never, allow_no_match?: never, force?: never } @@ -19308,15 +28375,25 @@ export interface MlStopTrainedModelDeploymentResponse { } export interface MlUpdateDataFrameAnalyticsRequest extends RequestBase { -/** Identifier for the data frame analytics job. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. */ + /** Identifier for the data frame analytics job. This identifier can contain + * lowercase alphanumeric characters (a-z and 0-9), hyphens, and + * underscores. It must start and end with alphanumeric characters. */ id: Id /** A description of the job. */ description?: string - /** The approximate maximum amount of memory resources that are permitted for analytical processing. If your `elasticsearch.yml` file contains an `xpack.ml.max_model_memory_limit` setting, an error occurs when you try to create data frame analytics jobs that have `model_memory_limit` values greater than that setting. */ + /** The approximate maximum amount of memory resources that are permitted for + * analytical processing. If your `elasticsearch.yml` file contains an + * `xpack.ml.max_model_memory_limit` setting, an error occurs when you try + * to create data frame analytics jobs that have `model_memory_limit` values + * greater than that setting. */ model_memory_limit?: string - /** The maximum number of threads to be used by the analysis. Using more threads may decrease the time necessary to complete the analysis at the cost of using more CPU. Note that the process may use additional threads for operational functionality other than the analysis itself. */ + /** The maximum number of threads to be used by the analysis. Using more + * threads may decrease the time necessary to complete the analysis at the + * cost of using more CPU. Note that the process may use additional threads + * for operational functionality other than the analysis itself. */ max_num_threads?: integer - /** Specifies whether this job can start when there is insufficient machine learning node capacity for it to be immediately assigned to a node. */ + /** Specifies whether this job can start when there is insufficient machine + * learning node capacity for it to be immediately assigned to a node. */ allow_lazy_start?: boolean /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { id?: never, description?: never, model_memory_limit?: never, max_num_threads?: never, allow_lazy_start?: never } @@ -19340,43 +28417,80 @@ export interface MlUpdateDataFrameAnalyticsResponse { } export interface MlUpdateDatafeedRequest extends RequestBase { -/** A numerical character string that uniquely identifies the datafeed. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. */ + /** A numerical character string that uniquely identifies the datafeed. + * This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. + * It must start and end with alphanumeric characters. */ datafeed_id: Id - /** If `true`, wildcard indices expressions that resolve into no concrete indices are ignored. This includes the `_all` string or when no indices are specified. */ + /** If `true`, wildcard indices expressions that resolve into no concrete indices are ignored. This includes the + * `_all` string or when no indices are specified. */ allow_no_indices?: boolean - /** Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values. Valid values are: * `all`: Match any data stream or index, including hidden ones. * `closed`: Match closed, non-hidden indices. Also matches any non-hidden data stream. Data streams cannot be closed. * `hidden`: Match hidden data streams and hidden indices. Must be combined with `open`, `closed`, or both. * `none`: Wildcard patterns are not accepted. * `open`: Match open, non-hidden indices. Also matches any non-hidden data stream. */ + /** Type of index that wildcard patterns can match. If the request can target data streams, this argument determines + * whether wildcard expressions match hidden data streams. Supports comma-separated values. Valid values are: + * + * * `all`: Match any data stream or index, including hidden ones. + * * `closed`: Match closed, non-hidden indices. Also matches any non-hidden data stream. Data streams cannot be closed. + * * `hidden`: Match hidden data streams and hidden indices. Must be combined with `open`, `closed`, or both. + * * `none`: Wildcard patterns are not accepted. + * * `open`: Match open, non-hidden indices. Also matches any non-hidden data stream. */ expand_wildcards?: ExpandWildcards /** If `true`, concrete, expanded or aliased indices are ignored when frozen. */ ignore_throttled?: boolean /** If `true`, unavailable indices (missing or closed) are ignored. */ ignore_unavailable?: boolean - /** If set, the datafeed performs aggregation searches. Support for aggregations is limited and should be used only with low cardinality data. */ + /** If set, the datafeed performs aggregation searches. Support for aggregations is limited and should be used only + * with low cardinality data. */ aggregations?: Record - /** Datafeeds might search over long time periods, for several months or years. This search is split into time chunks in order to ensure the load on Elasticsearch is managed. Chunking configuration controls how the size of these time chunks are calculated; it is an advanced configuration option. */ + /** Datafeeds might search over long time periods, for several months or years. This search is split into time + * chunks in order to ensure the load on Elasticsearch is managed. Chunking configuration controls how the size of + * these time chunks are calculated; it is an advanced configuration option. */ chunking_config?: MlChunkingConfig - /** Specifies whether the datafeed checks for missing data and the size of the window. The datafeed can optionally search over indices that have already been read in an effort to determine whether any data has subsequently been added to the index. If missing data is found, it is a good indication that the `query_delay` is set too low and the data is being indexed after the datafeed has passed that moment in time. This check runs only on real-time datafeeds. */ + /** Specifies whether the datafeed checks for missing data and the size of the window. The datafeed can optionally + * search over indices that have already been read in an effort to determine whether any data has subsequently been + * added to the index. If missing data is found, it is a good indication that the `query_delay` is set too low and + * the data is being indexed after the datafeed has passed that moment in time. This check runs only on real-time + * datafeeds. */ delayed_data_check_config?: MlDelayedDataCheckConfig - /** The interval at which scheduled queries are made while the datafeed runs in real time. The default value is either the bucket span for short bucket spans, or, for longer bucket spans, a sensible fraction of the bucket span. When `frequency` is shorter than the bucket span, interim results for the last (partial) bucket are written then eventually overwritten by the full bucket results. If the datafeed uses aggregations, this value must be divisible by the interval of the date histogram aggregation. */ + /** The interval at which scheduled queries are made while the datafeed runs in real time. The default value is + * either the bucket span for short bucket spans, or, for longer bucket spans, a sensible fraction of the bucket + * span. When `frequency` is shorter than the bucket span, interim results for the last (partial) bucket are + * written then eventually overwritten by the full bucket results. If the datafeed uses aggregations, this value + * must be divisible by the interval of the date histogram aggregation. */ frequency?: Duration - /** An array of index names. Wildcards are supported. If any of the indices are in remote clusters, the machine learning nodes must have the `remote_cluster_client` role. */ + /** An array of index names. Wildcards are supported. If any of the indices are in remote clusters, the machine + * learning nodes must have the `remote_cluster_client` role. */ indices?: string[] - /** @alias indices */ - /** An array of index names. Wildcards are supported. If any of the indices are in remote clusters, the machine learning nodes must have the `remote_cluster_client` role. */ + /** An array of index names. Wildcards are supported. If any of the indices are in remote clusters, the machine + * learning nodes must have the `remote_cluster_client` role. + * @alias indices */ indexes?: string[] /** Specifies index expansion options that are used during search. */ indices_options?: IndicesOptions job_id?: Id - /** If a real-time datafeed has never seen any data (including during any initial training period), it automatically stops and closes the associated job after this many real-time searches return no documents. In other words, it stops after `frequency` times `max_empty_searches` of real-time operation. If not set, a datafeed with no end time that sees no data remains started until it is explicitly stopped. By default, it is not set. */ + /** If a real-time datafeed has never seen any data (including during any initial training period), it automatically + * stops and closes the associated job after this many real-time searches return no documents. In other words, + * it stops after `frequency` times `max_empty_searches` of real-time operation. If not set, a datafeed with no + * end time that sees no data remains started until it is explicitly stopped. By default, it is not set. */ max_empty_searches?: integer - /** The Elasticsearch query domain-specific language (DSL). This value corresponds to the query object in an Elasticsearch search POST body. All the options that are supported by Elasticsearch can be used, as this object is passed verbatim to Elasticsearch. Note that if you change the query, the analyzed data is also changed. Therefore, the time required to learn might be long and the understandability of the results is unpredictable. If you want to make significant changes to the source data, it is recommended that you clone the job and datafeed and make the amendments in the clone. Let both run in parallel and close one when you are satisfied with the results of the job. */ + /** The Elasticsearch query domain-specific language (DSL). This value corresponds to the query object in an + * Elasticsearch search POST body. All the options that are supported by Elasticsearch can be used, as this + * object is passed verbatim to Elasticsearch. Note that if you change the query, the analyzed data is also + * changed. Therefore, the time required to learn might be long and the understandability of the results is + * unpredictable. If you want to make significant changes to the source data, it is recommended that you + * clone the job and datafeed and make the amendments in the clone. Let both run in parallel and close one + * when you are satisfied with the results of the job. */ query?: QueryDslQueryContainer - /** The number of seconds behind real time that data is queried. For example, if data from 10:04 a.m. might not be searchable in Elasticsearch until 10:06 a.m., set this property to 120 seconds. The default value is randomly selected between `60s` and `120s`. This randomness improves the query performance when there are multiple jobs running on the same node. */ + /** The number of seconds behind real time that data is queried. For example, if data from 10:04 a.m. might + * not be searchable in Elasticsearch until 10:06 a.m., set this property to 120 seconds. The default + * value is randomly selected between `60s` and `120s`. This randomness improves the query performance + * when there are multiple jobs running on the same node. */ query_delay?: Duration /** Specifies runtime fields for the datafeed search. */ runtime_mappings?: MappingRuntimeFields - /** Specifies scripts that evaluate custom expressions and returns script fields to the datafeed. The detector configuration objects in a job can contain functions that use these script fields. */ + /** Specifies scripts that evaluate custom expressions and returns script fields to the datafeed. + * The detector configuration objects in a job can contain functions that use these script fields. */ script_fields?: Record - /** The size parameter that is used in Elasticsearch searches when the datafeed does not use aggregations. The maximum value is the value of `index.max_result_window`. */ + /** The size parameter that is used in Elasticsearch searches when the datafeed does not use aggregations. + * The maximum value is the value of `index.max_result_window`. */ scroll_size?: integer /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { datafeed_id?: never, allow_no_indices?: never, expand_wildcards?: never, ignore_throttled?: never, ignore_unavailable?: never, aggregations?: never, chunking_config?: never, delayed_data_check_config?: never, frequency?: never, indices?: never, indexes?: never, indices_options?: never, job_id?: never, max_empty_searches?: never, query?: never, query_delay?: never, runtime_mappings?: never, script_fields?: never, scroll_size?: never } @@ -19403,7 +28517,7 @@ export interface MlUpdateDatafeedResponse { } export interface MlUpdateFilterRequest extends RequestBase { -/** A string that uniquely identifies a filter. */ + /** A string that uniquely identifies a filter. */ filter_id: Id /** The items to add to the filter. */ add_items?: string[] @@ -19424,27 +28538,61 @@ export interface MlUpdateFilterResponse { } export interface MlUpdateJobRequest extends RequestBase { -/** Identifier for the job. */ + /** Identifier for the job. */ job_id: Id - /** Advanced configuration option. Specifies whether this job can open when there is insufficient machine learning node capacity for it to be immediately assigned to a node. If `false` and a machine learning node with capacity to run the job cannot immediately be found, the open anomaly detection jobs API returns an error. However, this is also subject to the cluster-wide `xpack.ml.max_lazy_ml_nodes` setting. If this option is set to `true`, the open anomaly detection jobs API does not return an error and the job waits in the opening state until sufficient machine learning node capacity is available. */ + /** Advanced configuration option. Specifies whether this job can open when + * there is insufficient machine learning node capacity for it to be + * immediately assigned to a node. If `false` and a machine learning node + * with capacity to run the job cannot immediately be found, the open + * anomaly detection jobs API returns an error. However, this is also + * subject to the cluster-wide `xpack.ml.max_lazy_ml_nodes` setting. If this + * option is set to `true`, the open anomaly detection jobs API does not + * return an error and the job waits in the opening state until sufficient + * machine learning node capacity is available. */ allow_lazy_open?: boolean analysis_limits?: MlAnalysisMemoryLimit - /** Advanced configuration option. The time between each periodic persistence of the model. The default value is a randomized value between 3 to 4 hours, which avoids all jobs persisting at exactly the same time. The smallest allowed value is 1 hour. For very large models (several GB), persistence could take 10-20 minutes, so do not set the value too low. If the job is open when you make the update, you must stop the datafeed, close the job, then reopen the job and restart the datafeed for the changes to take effect. */ + /** Advanced configuration option. The time between each periodic persistence + * of the model. + * The default value is a randomized value between 3 to 4 hours, which + * avoids all jobs persisting at exactly the same time. The smallest allowed + * value is 1 hour. + * For very large models (several GB), persistence could take 10-20 minutes, + * so do not set the value too low. + * If the job is open when you make the update, you must stop the datafeed, + * close the job, then reopen the job and restart the datafeed for the + * changes to take effect. */ background_persist_interval?: Duration - /** Advanced configuration option. Contains custom meta data about the job. For example, it can contain custom URL information as shown in Adding custom URLs to machine learning results. */ + /** Advanced configuration option. Contains custom meta data about the job. + * For example, it can contain custom URL information as shown in Adding + * custom URLs to machine learning results. */ custom_settings?: Record categorization_filters?: string[] /** A description of the job. */ description?: string model_plot_config?: MlModelPlotConfig model_prune_window?: Duration - /** Advanced configuration option, which affects the automatic removal of old model snapshots for this job. It specifies a period of time (in days) after which only the first snapshot per day is retained. This period is relative to the timestamp of the most recent snapshot for this job. Valid values range from 0 to `model_snapshot_retention_days`. For jobs created before version 7.8.0, the default value matches `model_snapshot_retention_days`. */ + /** Advanced configuration option, which affects the automatic removal of old + * model snapshots for this job. It specifies a period of time (in days) + * after which only the first snapshot per day is retained. This period is + * relative to the timestamp of the most recent snapshot for this job. Valid + * values range from 0 to `model_snapshot_retention_days`. For jobs created + * before version 7.8.0, the default value matches + * `model_snapshot_retention_days`. */ daily_model_snapshot_retention_after_days?: long - /** Advanced configuration option, which affects the automatic removal of old model snapshots for this job. It specifies the maximum period of time (in days) that snapshots are retained. This period is relative to the timestamp of the most recent snapshot for this job. */ + /** Advanced configuration option, which affects the automatic removal of old + * model snapshots for this job. It specifies the maximum period of time (in + * days) that snapshots are retained. This period is relative to the + * timestamp of the most recent snapshot for this job. */ model_snapshot_retention_days?: long - /** Advanced configuration option. The period over which adjustments to the score are applied, as new data is seen. */ + /** Advanced configuration option. The period over which adjustments to the + * score are applied, as new data is seen. */ renormalization_window_days?: long - /** Advanced configuration option. The period of time (in days) that results are retained. Age is calculated relative to the timestamp of the latest bucket result. If this property has a non-null value, once per day at 00:30 (server time), results that are the specified number of days older than the latest bucket result are deleted from Elasticsearch. The default value is null, which means all results are retained. */ + /** Advanced configuration option. The period of time (in days) that results + * are retained. Age is calculated relative to the timestamp of the latest + * bucket result. If this property has a non-null value, once per day at + * 00:30 (server time), results that are the specified number of days older + * than the latest bucket result are deleted from Elasticsearch. The default + * value is null, which means all results are retained. */ results_retention_days?: long /** A list of job groups. A job can belong to no groups or many. */ groups?: string[] @@ -19483,13 +28631,15 @@ export interface MlUpdateJobResponse { } export interface MlUpdateModelSnapshotRequest extends RequestBase { -/** Identifier for the anomaly detection job. */ + /** Identifier for the anomaly detection job. */ job_id: Id /** Identifier for the model snapshot. */ snapshot_id: Id /** A description of the model snapshot. */ description?: string - /** If `true`, this snapshot will not be deleted during automatic cleanup of snapshots older than `model_snapshot_retention_days`. However, this snapshot will be deleted when the job is deleted. */ + /** If `true`, this snapshot will not be deleted during automatic cleanup of + * snapshots older than `model_snapshot_retention_days`. However, this + * snapshot will be deleted when the job is deleted. */ retain?: boolean /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { job_id?: never, snapshot_id?: never, description?: never, retain?: never } @@ -19503,11 +28653,19 @@ export interface MlUpdateModelSnapshotResponse { } export interface MlUpdateTrainedModelDeploymentRequest extends RequestBase { -/** The unique identifier of the trained model. Currently, only PyTorch models are supported. */ + /** The unique identifier of the trained model. Currently, only PyTorch models are supported. */ model_id: Id - /** The number of model allocations on each node where the model is deployed. All allocations on a node share the same copy of the model in memory but use a separate set of threads to evaluate the model. Increasing this value generally increases the throughput. If this setting is greater than the number of hardware threads it will automatically be changed to a value less than the number of hardware threads. If adaptive_allocations is enabled, do not set this value, because it’s automatically set. */ + /** The number of model allocations on each node where the model is deployed. + * All allocations on a node share the same copy of the model in memory but use + * a separate set of threads to evaluate the model. + * Increasing this value generally increases the throughput. + * If this setting is greater than the number of hardware threads + * it will automatically be changed to a value less than the number of hardware threads. + * If adaptive_allocations is enabled, do not set this value, because it’s automatically set. */ number_of_allocations?: integer - /** Adaptive allocations configuration. When enabled, the number of allocations is set based on the current load. If adaptive_allocations is enabled, do not set the number of allocations manually. */ + /** Adaptive allocations configuration. When enabled, the number of allocations + * is set based on the current load. + * If adaptive_allocations is enabled, do not set the number of allocations manually. */ adaptive_allocations?: MlAdaptiveAllocationsSettings /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { model_id?: never, number_of_allocations?: never, adaptive_allocations?: never } @@ -19520,11 +28678,12 @@ export interface MlUpdateTrainedModelDeploymentResponse { } export interface MlUpgradeJobSnapshotRequest extends RequestBase { -/** Identifier for the anomaly detection job. */ + /** Identifier for the anomaly detection job. */ job_id: Id /** A numerical character string that uniquely identifies the model snapshot. */ snapshot_id: Id - /** When true, the API won’t respond until the upgrade is complete. Otherwise, it responds as soon as the upgrade task is assigned to a node. */ + /** When true, the API won’t respond until the upgrade is complete. + * Otherwise, it responds as soon as the upgrade task is assigned to a node. */ wait_for_completion?: boolean /** Controls the time to wait for the request to complete. */ timeout?: Duration @@ -19535,7 +28694,9 @@ export interface MlUpgradeJobSnapshotRequest extends RequestBase { } export interface MlUpgradeJobSnapshotResponse { + /** The ID of the node that the upgrade task was started on if it is still running. In serverless this will be the "serverless". */ node: NodeId + /** When true, this means the task is complete. When false, it is still running. */ completed: boolean } @@ -19568,7 +28729,7 @@ export interface MlValidateDetectorRequest extends RequestBase { export type MlValidateDetectorResponse = AcknowledgedResponseBase export interface MonitoringBulkRequest extends RequestBase { -/** Default document type for items which don't provide one */ + /** Default document type for items which don't provide one */ type?: string /** Identifier of the monitored system */ system_id: string @@ -19585,66 +28746,110 @@ export interface MonitoringBulkRequest } export interface NodesCgroupMemory { + /** The `memory` control group to which the Elasticsearch process belongs. */ control_group?: string + /** The maximum amount of user memory (including file cache) allowed for all tasks in the same cgroup as the Elasticsearch process. + * This value can be too big to store in a `long`, so is returned as a string so that the value returned can exactly match what the underlying operating system interface returns. + * Any value that is too large to parse into a `long` almost certainly means no limit has been set for the cgroup. */ limit_in_bytes?: string + /** The total current memory usage by processes in the cgroup, in bytes, by all tasks in the same cgroup as the Elasticsearch process. + * This value is stored as a string for consistency with `limit_in_bytes`. */ usage_in_bytes?: string } export interface NodesClient { + /** Unique ID for the HTTP client. */ id?: long + /** Reported agent for the HTTP client. + * If unavailable, this property is not included in the response. */ agent?: string + /** Local address for the HTTP connection. */ local_address?: string + /** Remote address for the HTTP connection. */ remote_address?: string + /** The URI of the client’s most recent request. */ last_uri?: string + /** Time at which the client opened the connection. */ opened_time_millis?: long + /** Time at which the client closed the connection if the connection is closed. */ closed_time_millis?: long + /** Time of the most recent request from this client. */ last_request_time_millis?: long + /** Number of requests from this client. */ request_count?: long + /** Cumulative size in bytes of all requests from this client. */ request_size_bytes?: long + /** Value from the client’s `x-opaque-id` HTTP header. + * If unavailable, this property is not included in the response. */ x_opaque_id?: string } @@ -19653,26 +28858,48 @@ export interface NodesClusterAppliedStats { } export interface NodesClusterStateQueue { + /** Total number of cluster states in queue. */ total?: long + /** Number of pending cluster states in queue. */ pending?: long + /** Number of committed cluster states in queue. */ committed?: long } export interface NodesClusterStateUpdate { + /** The number of cluster state update attempts that did not change the cluster state since the node started. */ count: long + /** The cumulative amount of time spent computing no-op cluster state updates since the node started. */ computation_time?: Duration + /** The cumulative amount of time, in milliseconds, spent computing no-op cluster state updates since the node started. */ computation_time_millis?: DurationValue + /** The cumulative amount of time spent publishing cluster state updates which ultimately succeeded, which includes everything from the start of the publication (just after the computation of the new cluster state) until the publication has finished and the master node is ready to start processing the next state update. + * This includes the time measured by `context_construction_time`, `commit_time`, `completion_time` and `master_apply_time`. */ publication_time?: Duration + /** The cumulative amount of time, in milliseconds, spent publishing cluster state updates which ultimately succeeded, which includes everything from the start of the publication (just after the computation of the new cluster state) until the publication has finished and the master node is ready to start processing the next state update. + * This includes the time measured by `context_construction_time`, `commit_time`, `completion_time` and `master_apply_time`. */ publication_time_millis?: DurationValue + /** The cumulative amount of time spent constructing a publication context since the node started for publications that ultimately succeeded. + * This statistic includes the time spent computing the difference between the current and new cluster state preparing a serialized representation of this difference. */ context_construction_time?: Duration + /** The cumulative amount of time, in milliseconds, spent constructing a publication context since the node started for publications that ultimately succeeded. + * This statistic includes the time spent computing the difference between the current and new cluster state preparing a serialized representation of this difference. */ context_construction_time_millis?: DurationValue + /** The cumulative amount of time spent waiting for a successful cluster state update to commit, which measures the time from the start of each publication until a majority of the master-eligible nodes have written the state to disk and confirmed the write to the elected master. */ commit_time?: Duration + /** The cumulative amount of time, in milliseconds, spent waiting for a successful cluster state update to commit, which measures the time from the start of each publication until a majority of the master-eligible nodes have written the state to disk and confirmed the write to the elected master. */ commit_time_millis?: DurationValue + /** The cumulative amount of time spent waiting for a successful cluster state update to complete, which measures the time from the start of each publication until all the other nodes have notified the elected master that they have applied the cluster state. */ completion_time?: Duration + /** The cumulative amount of time, in milliseconds, spent waiting for a successful cluster state update to complete, which measures the time from the start of each publication until all the other nodes have notified the elected master that they have applied the cluster state. */ completion_time_millis?: DurationValue + /** The cumulative amount of time spent successfully applying cluster state updates on the elected master since the node started. */ master_apply_time?: Duration + /** The cumulative amount of time, in milliseconds, spent successfully applying cluster state updates on the elected master since the node started. */ master_apply_time_millis?: DurationValue + /** The cumulative amount of time spent notifying listeners of a no-op cluster state update since the node started. */ notification_time?: Duration + /** The cumulative amount of time, in milliseconds, spent notifying listeners of a no-op cluster state update since the node started. */ notification_time_millis?: DurationValue } @@ -19695,12 +28922,16 @@ export interface NodesCpu { } export interface NodesCpuAcct { + /** The `cpuacct` control group to which the Elasticsearch process belongs. */ control_group?: string + /** The total CPU time, in nanoseconds, consumed by all tasks in the same cgroup as the Elasticsearch process. */ usage_nanos?: DurationValue } export interface NodesDataPathStats { + /** Total amount of disk space available to this Java virtual machine on this file store. */ available?: string + /** Total number of bytes available to this Java virtual machine on this file store. */ available_in_bytes?: long disk_queue?: string disk_reads?: long @@ -19709,58 +28940,98 @@ export interface NodesDataPathStats { disk_writes?: long disk_write_size?: string disk_write_size_in_bytes?: long + /** Total amount of unallocated disk space in the file store. */ free?: string + /** Total number of unallocated bytes in the file store. */ free_in_bytes?: long + /** Mount point of the file store (for example: `/dev/sda2`). */ mount?: string + /** Path to the file store. */ path?: string + /** Total size of the file store. */ total?: string + /** Total size of the file store in bytes. */ total_in_bytes?: long + /** Type of the file store (ex: ext4). */ type?: string } export interface NodesDiscovery { + /** Contains statistics for the cluster state queue of the node. */ cluster_state_queue?: NodesClusterStateQueue + /** Contains statistics for the published cluster states of the node. */ published_cluster_states?: NodesPublishedClusterStates + /** Contains low-level statistics about how long various activities took during cluster state updates while the node was the elected master. + * Omitted if the node is not master-eligible. + * Every field whose name ends in `_time` within this object is also represented as a raw number of milliseconds in a field whose name ends in `_time_millis`. + * The human-readable fields with a `_time` suffix are only returned if requested with the `?human=true` query parameter. */ cluster_state_update?: Record serialized_cluster_states?: NodesSerializedClusterState cluster_applier_stats?: NodesClusterAppliedStats } export interface NodesExtendedMemoryStats extends NodesMemoryStats { + /** Percentage of free memory. */ free_percent?: integer + /** Percentage of used memory. */ used_percent?: integer } export interface NodesFileSystem { + /** List of all file stores. */ data?: NodesDataPathStats[] + /** Last time the file stores statistics were refreshed. + * Recorded in milliseconds since the Unix Epoch. */ timestamp?: long + /** Contains statistics for all file stores of the node. */ total?: NodesFileSystemTotal + /** Contains I/O statistics for the node. */ io_stats?: NodesIoStats } export interface NodesFileSystemTotal { + /** Total disk space available to this Java virtual machine on all file stores. + * Depending on OS or process level restrictions, this might appear less than `free`. + * This is the actual amount of free disk space the Elasticsearch node can utilise. */ available?: string + /** Total number of bytes available to this Java virtual machine on all file stores. + * Depending on OS or process level restrictions, this might appear less than `free_in_bytes`. + * This is the actual amount of free disk space the Elasticsearch node can utilise. */ available_in_bytes?: long + /** Total unallocated disk space in all file stores. */ free?: string + /** Total number of unallocated bytes in all file stores. */ free_in_bytes?: long + /** Total size of all file stores. */ total?: string + /** Total size of all file stores in bytes. */ total_in_bytes?: long } export interface NodesGarbageCollector { + /** Contains statistics about JVM garbage collectors for the node. */ collectors?: Record } export interface NodesGarbageCollectorTotal { + /** Total number of JVM garbage collectors that collect objects. */ collection_count?: long + /** Total time spent by JVM collecting objects. */ collection_time?: string + /** Total time, in milliseconds, spent by JVM collecting objects. */ collection_time_in_millis?: long } export interface NodesHttp { + /** Current number of open HTTP connections for the node. */ current_open?: integer + /** Total number of HTTP connections opened for the node. */ total_opened?: long + /** Information on current and recently-closed HTTP client connections. + * Clients that have been closed longer than the `http.client_stats.closed_channels.max_age` setting will not be represented here. */ clients?: NodesClient[] + /** Detailed HTTP stats broken down by route + * @remarks This property is not supported on Elastic Cloud Serverless. */ routes: Record } @@ -19783,81 +29054,136 @@ export interface NodesHttpRouteResponses { } export interface NodesIndexingPressure { + /** Contains statistics for memory consumption from indexing load. */ memory?: NodesIndexingPressureMemory } export interface NodesIndexingPressureMemory { + /** Configured memory limit for the indexing requests. + * Replica requests have an automatic limit that is 1.5x this value. */ limit?: ByteSize + /** Configured memory limit, in bytes, for the indexing requests. + * Replica requests have an automatic limit that is 1.5x this value. */ limit_in_bytes?: long + /** Contains statistics for current indexing load. */ current?: NodesPressureMemory + /** Contains statistics for the cumulative indexing load since the node started. */ total?: NodesPressureMemory } export interface NodesIngest { + /** Contains statistics about ingest pipelines for the node. */ pipelines?: Record + /** Contains statistics about ingest operations for the node. */ total?: NodesIngestTotal } export interface NodesIngestStats { + /** Total number of documents ingested during the lifetime of this node. */ count: long + /** Total number of documents currently being ingested. */ current: long + /** Total number of failed ingest operations during the lifetime of this node. */ failed: long + /** Total number of ingest processors. */ processors: Record[] + /** Total time, in milliseconds, spent preprocessing ingest documents during the lifetime of this node. */ time_in_millis: DurationValue + /** Total number of bytes of all documents ingested by the pipeline. + * This field is only present on pipelines which are the first to process a document. + * Thus, it is not present on pipelines which only serve as a final pipeline after a default pipeline, a pipeline run after a reroute processor, or pipelines in pipeline processors. */ ingested_as_first_pipeline_in_bytes: long + /** Total number of bytes of all documents produced by the pipeline. + * This field is only present on pipelines which are the first to process a document. + * Thus, it is not present on pipelines which only serve as a final pipeline after a default pipeline, a pipeline run after a reroute processor, or pipelines in pipeline processors. + * In situations where there are subsequent pipelines, the value represents the size of the document after all pipelines have run. */ produced_as_first_pipeline_in_bytes: long } export interface NodesIngestTotal { + /** Total number of documents ingested during the lifetime of this node. */ count: long + /** Total number of documents currently being ingested. */ current: long + /** Total number of failed ingest operations during the lifetime of this node. */ failed: long + /** Total time, in milliseconds, spent preprocessing ingest documents during the lifetime of this node. */ time_in_millis: DurationValue } export interface NodesIoStatDevice { + /** The Linux device name. */ device_name?: string + /** The total number of read and write operations for the device completed since starting Elasticsearch. */ operations?: long + /** The total number of kilobytes read for the device since starting Elasticsearch. */ read_kilobytes?: long + /** The total number of read operations for the device completed since starting Elasticsearch. */ read_operations?: long + /** The total number of kilobytes written for the device since starting Elasticsearch. */ write_kilobytes?: long + /** The total number of write operations for the device completed since starting Elasticsearch. */ write_operations?: long } export interface NodesIoStats { + /** Array of disk metrics for each device that is backing an Elasticsearch data path. + * These disk metrics are probed periodically and averages between the last probe and the current probe are computed. */ devices?: NodesIoStatDevice[] + /** The sum of the disk metrics for all devices that back an Elasticsearch data path. */ total?: NodesIoStatDevice } export interface NodesJvm { + /** Contains statistics about JVM buffer pools for the node. */ buffer_pools?: Record + /** Contains statistics about classes loaded by JVM for the node. */ classes?: NodesJvmClasses + /** Contains statistics about JVM garbage collectors for the node. */ gc?: NodesGarbageCollector + /** Contains JVM memory usage statistics for the node. */ mem?: NodesJvmMemoryStats + /** Contains statistics about JVM thread usage for the node. */ threads?: NodesJvmThreads + /** Last time JVM statistics were refreshed. */ timestamp?: long + /** Human-readable JVM uptime. + * Only returned if the `human` query parameter is `true`. */ uptime?: string + /** JVM uptime in milliseconds. */ uptime_in_millis?: long } export interface NodesJvmClasses { + /** Number of classes currently loaded by JVM. */ current_loaded_count?: long + /** Total number of classes loaded since the JVM started. */ total_loaded_count?: long + /** Total number of classes unloaded since the JVM started. */ total_unloaded_count?: long } export interface NodesJvmMemoryStats { + /** Memory, in bytes, currently in use by the heap. */ heap_used_in_bytes?: long + /** Percentage of memory currently in use by the heap. */ heap_used_percent?: long + /** Amount of memory, in bytes, available for use by the heap. */ heap_committed_in_bytes?: long + /** Maximum amount of memory, in bytes, available for use by the heap. */ heap_max_in_bytes?: long + /** Non-heap memory used, in bytes. */ non_heap_used_in_bytes?: long + /** Amount of non-heap memory available, in bytes. */ non_heap_committed_in_bytes?: long + /** Contains statistics about heap memory usage for the node. */ pools?: Record } export interface NodesJvmThreads { + /** Number of active threads in use by JVM. */ count?: long + /** Highest number of threads used by JVM. */ peak_count?: long } @@ -19867,6 +29193,8 @@ export interface NodesKeyedProcessor { } export interface NodesMemoryStats { + /** If the amount of physical memory has been overridden using the `es`.`total_memory_bytes` system property then this reports the overridden value in bytes. + * Otherwise it reports the same value as `total_in_bytes`. */ adjusted_total_in_bytes?: long resident?: string resident_in_bytes?: long @@ -19874,27 +29202,34 @@ export interface NodesMemoryStats { share_in_bytes?: long total_virtual?: string total_virtual_in_bytes?: long + /** Total amount of physical memory in bytes. */ total_in_bytes?: long + /** Amount of free physical memory in bytes. */ free_in_bytes?: long + /** Amount of used physical memory in bytes. */ used_in_bytes?: long } export interface NodesNodeBufferPool { + /** Number of buffer pools. */ count?: long + /** Total capacity of buffer pools. */ total_capacity?: string + /** Total capacity of buffer pools in bytes. */ total_capacity_in_bytes?: long + /** Size of buffer pools. */ used?: string + /** Size of buffer pools in bytes. */ used_in_bytes?: long } -export interface NodesNodeReloadError { +export interface NodesNodeReloadResult { name: Name reload_exception?: ErrorCause } -export type NodesNodeReloadResult = NodesStats | NodesNodeReloadError - export interface NodesNodesResponseBase { + /** Contains statistics about the number of nodes selected by the request’s node filters. */ _nodes?: NodeStatistics } @@ -19907,46 +29242,78 @@ export interface NodesOperatingSystem { } export interface NodesPool { + /** Memory, in bytes, used by the heap. */ used_in_bytes?: long + /** Maximum amount of memory, in bytes, available for use by the heap. */ max_in_bytes?: long + /** Largest amount of memory, in bytes, historically used by the heap. */ peak_used_in_bytes?: long + /** Largest amount of memory, in bytes, historically used by the heap. */ peak_max_in_bytes?: long } export interface NodesPressureMemory { + /** Memory consumed by indexing requests in the coordinating, primary, or replica stage. */ all?: ByteSize + /** Memory consumed, in bytes, by indexing requests in the coordinating, primary, or replica stage. */ all_in_bytes?: long + /** Memory consumed by indexing requests in the coordinating or primary stage. + * This value is not the sum of coordinating and primary as a node can reuse the coordinating memory if the primary stage is executed locally. */ combined_coordinating_and_primary?: ByteSize + /** Memory consumed, in bytes, by indexing requests in the coordinating or primary stage. + * This value is not the sum of coordinating and primary as a node can reuse the coordinating memory if the primary stage is executed locally. */ combined_coordinating_and_primary_in_bytes?: long + /** Memory consumed by indexing requests in the coordinating stage. */ coordinating?: ByteSize + /** Memory consumed, in bytes, by indexing requests in the coordinating stage. */ coordinating_in_bytes?: long + /** Memory consumed by indexing requests in the primary stage. */ primary?: ByteSize + /** Memory consumed, in bytes, by indexing requests in the primary stage. */ primary_in_bytes?: long + /** Memory consumed by indexing requests in the replica stage. */ replica?: ByteSize + /** Memory consumed, in bytes, by indexing requests in the replica stage. */ replica_in_bytes?: long + /** Number of indexing requests rejected in the coordinating stage. */ coordinating_rejections?: long + /** Number of indexing requests rejected in the primary stage. */ primary_rejections?: long + /** Number of indexing requests rejected in the replica stage. */ replica_rejections?: long } export interface NodesProcess { + /** Contains CPU statistics for the node. */ cpu?: NodesCpu + /** Contains virtual memory statistics for the node. */ mem?: NodesMemoryStats + /** Number of opened file descriptors associated with the current or `-1` if not supported. */ open_file_descriptors?: integer + /** Maximum number of file descriptors allowed on the system, or `-1` if not supported. */ max_file_descriptors?: integer + /** Last time the statistics were refreshed. + * Recorded in milliseconds since the Unix Epoch. */ timestamp?: long } export interface NodesProcessor { + /** Number of documents transformed by the processor. */ count?: long + /** Number of documents currently being transformed by the processor. */ current?: long + /** Number of failed operations for the processor. */ failed?: long + /** Time, in milliseconds, spent by the processor transforming documents. */ time_in_millis?: DurationValue } export interface NodesPublishedClusterStates { + /** Number of published cluster states. */ full_states?: long + /** Number of incompatible differences between published cluster states. */ incompatible_diffs?: long + /** Number of compatible differences between published cluster states. */ compatible_diffs?: long } @@ -19959,52 +29326,89 @@ export interface NodesRecording { export interface NodesRepositoryLocation { base_path: string + /** Container name (Azure) */ container?: string + /** Bucket name (GCP, S3) */ bucket?: string } export interface NodesRepositoryMeteringInformation { + /** Repository name. */ repository_name: Name + /** Repository type. */ repository_type: string + /** Represents an unique location within the repository. */ repository_location: NodesRepositoryLocation + /** An identifier that changes every time the repository is updated. */ repository_ephemeral_id: Id + /** Time the repository was created or updated. Recorded in milliseconds since the Unix Epoch. */ repository_started_at: EpochTime + /** Time the repository was deleted or updated. Recorded in milliseconds since the Unix Epoch. */ repository_stopped_at?: EpochTime + /** A flag that tells whether or not this object has been archived. When a repository is closed or updated the + * repository metering information is archived and kept for a certain period of time. This allows retrieving the + * repository metering information of previous repository instantiations. */ archived: boolean + /** The cluster state version when this object was archived, this field can be used as a logical timestamp to delete + * all the archived metrics up to an observed version. This field is only present for archived repository metering + * information objects. The main purpose of this field is to avoid possible race conditions during repository metering + * information deletions, i.e. deleting archived repositories metering information that we haven’t observed yet. */ cluster_version?: VersionNumber + /** An object with the number of request performed against the repository grouped by request type. */ request_counts: NodesRequestCounts } export interface NodesRequestCounts { + /** Number of Get Blob Properties requests (Azure) */ GetBlobProperties?: long + /** Number of Get Blob requests (Azure) */ GetBlob?: long + /** Number of List Blobs requests (Azure) */ ListBlobs?: long + /** Number of Put Blob requests (Azure) */ PutBlob?: long + /** Number of Put Block (Azure) */ PutBlock?: long + /** Number of Put Block List requests */ PutBlockList?: long + /** Number of get object requests (GCP, S3) */ GetObject?: long + /** Number of list objects requests (GCP, S3) */ ListObjects?: long + /** Number of insert object requests, including simple, multipart and resumable uploads. Resumable uploads + * can perform multiple http requests to insert a single object but they are considered as a single request + * since they are billed as an individual operation. (GCP) */ InsertObject?: long + /** Number of PutObject requests (S3) */ PutObject?: long + /** Number of Multipart requests, including CreateMultipartUpload, UploadPart and CompleteMultipartUpload requests (S3) */ PutMultipartObject?: long } export interface NodesScriptCache { + /** Total number of times the script cache has evicted old data. */ cache_evictions?: long + /** Total number of times the script compilation circuit breaker has limited inline script compilations. */ compilation_limit_triggered?: long + /** Total number of inline script compilations performed by the node. */ compilations?: long context?: string } export interface NodesScripting { + /** Total number of times the script cache has evicted old data. */ cache_evictions?: long + /** Total number of inline script compilations performed by the node. */ compilations?: long + /** Contains this recent history of script compilations. */ compilations_history?: Record + /** Total number of times the script compilation circuit breaker has limited inline script compilations. */ compilation_limit_triggered?: long contexts?: NodesContext[] } export interface NodesSerializedClusterState { + /** Number of published cluster states. */ full_states?: NodesSerializedClusterStateDetail diffs?: NodesSerializedClusterStateDetail } @@ -20024,36 +29428,63 @@ export interface NodesSizeHttpHistogram { } export interface NodesStats { + /** Statistics about adaptive replica selection. */ adaptive_selection?: Record + /** Statistics about the field data circuit breaker. */ breakers?: Record + /** File system information, data path, free disk space, read/write stats. */ fs?: NodesFileSystem + /** Network host for the node, based on the network host setting. */ host?: Host + /** HTTP connection information. */ http?: NodesHttp + /** Statistics about ingest preprocessing. */ ingest?: NodesIngest + /** IP address and port for the node. */ ip?: Ip | Ip[] + /** JVM stats, memory pool information, garbage collection, buffer pools, number of loaded/unloaded classes. */ jvm?: NodesJvm + /** Human-readable identifier for the node. + * Based on the node name setting. */ name?: Name + /** Operating system stats, load average, mem, swap. */ os?: NodesOperatingSystem + /** Process statistics, memory consumption, cpu usage, open file descriptors. */ process?: NodesProcess + /** Roles assigned to the node. */ roles?: NodeRoles + /** Contains script statistics for the node. */ script?: NodesScripting script_cache?: Record + /** Statistics about each thread pool, including current size, queue and rejected tasks. */ thread_pool?: Record timestamp?: long + /** Transport statistics about sent and received bytes in cluster communication. */ transport?: NodesTransport + /** Host and port for the transport layer, used for internal communication between nodes in a cluster. */ transport_address?: TransportAddress + /** Contains a list of attributes for the node. */ attributes?: Record + /** Contains node discovery statistics for the node. */ discovery?: NodesDiscovery + /** Contains indexing pressure statistics for the node. */ indexing_pressure?: NodesIndexingPressure + /** Indices stats about size, document count, indexing and deletion times, search times, field cache size, merges and flushes. */ indices?: IndicesStatsShardStats } export interface NodesThreadCount { + /** Number of active threads in the thread pool. */ active?: long + /** Number of tasks completed by the thread pool executor. */ completed?: long + /** Highest number of active threads in the thread pool. */ largest?: long + /** Number of tasks in queue for the thread pool. */ queue?: long + /** Number of tasks rejected by the thread pool executor. */ rejected?: long + /** Number of threads in the thread pool. */ threads?: long } @@ -20064,26 +29495,42 @@ export interface NodesTimeHttpHistogram { } export interface NodesTransport { + /** The distribution of the time spent handling each inbound message on a transport thread, represented as a histogram. */ inbound_handling_time_histogram?: NodesTransportHistogram[] + /** The distribution of the time spent sending each outbound transport message on a transport thread, represented as a histogram. */ outbound_handling_time_histogram?: NodesTransportHistogram[] + /** Total number of RX (receive) packets received by the node during internal cluster communication. */ rx_count?: long + /** Size of RX packets received by the node during internal cluster communication. */ rx_size?: string + /** Size, in bytes, of RX packets received by the node during internal cluster communication. */ rx_size_in_bytes?: long + /** Current number of inbound TCP connections used for internal communication between nodes. */ server_open?: integer + /** Total number of TX (transmit) packets sent by the node during internal cluster communication. */ tx_count?: long + /** Size of TX packets sent by the node during internal cluster communication. */ tx_size?: string + /** Size, in bytes, of TX packets sent by the node during internal cluster communication. */ tx_size_in_bytes?: long + /** The cumulative number of outbound transport connections that this node has opened since it started. + * Each transport connection may comprise multiple TCP connections but is only counted once in this statistic. + * Transport connections are typically long-lived so this statistic should remain constant in a stable cluster. */ total_outbound_connections?: long } export interface NodesTransportHistogram { + /** The number of times a transport thread took a period of time within the bounds of this bucket to handle an inbound message. */ count?: long + /** The exclusive upper bound of the bucket in milliseconds. + * May be omitted on the last bucket if this bucket has no upper bound. */ lt_millis?: long + /** The inclusive lower bound of the bucket in milliseconds. May be omitted on the first bucket if this bucket has no lower bound. */ ge_millis?: long } export interface NodesClearRepositoriesMeteringArchiveRequest extends RequestBase { -/** Comma-separated list of node IDs or names used to limit returned information. */ + /** Comma-separated list of node IDs or names used to limit returned information. */ node_id: NodeIds /** Specifies the maximum `archive_version` to be cleared from the archive. */ max_archive_version: long @@ -20096,12 +29543,14 @@ export interface NodesClearRepositoriesMeteringArchiveRequest extends RequestBas export type NodesClearRepositoriesMeteringArchiveResponse = NodesClearRepositoriesMeteringArchiveResponseBase export interface NodesClearRepositoriesMeteringArchiveResponseBase extends NodesNodesResponseBase { + /** Name of the cluster. Based on the `cluster.name` setting. */ cluster_name: Name + /** Contains repositories metering information for the nodes selected by the request. */ nodes: Record } export interface NodesGetRepositoriesMeteringInfoRequest extends RequestBase { -/** Comma-separated list of node IDs or names used to limit returned information. All the nodes selective options are explained [here](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster.html#cluster-nodes). */ + /** Comma-separated list of node IDs or names used to limit returned information. */ node_id: NodeIds /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { node_id?: never } @@ -20112,14 +29561,17 @@ export interface NodesGetRepositoriesMeteringInfoRequest extends RequestBase { export type NodesGetRepositoriesMeteringInfoResponse = NodesGetRepositoriesMeteringInfoResponseBase export interface NodesGetRepositoriesMeteringInfoResponseBase extends NodesNodesResponseBase { + /** Name of the cluster. Based on the `cluster.name` setting. */ cluster_name: Name + /** Contains repositories metering information for the nodes selected by the request. */ nodes: Record } export interface NodesHotThreadsRequest extends RequestBase { -/** List of node IDs or names used to limit returned information. */ + /** List of node IDs or names used to limit returned information. */ node_id?: NodeIds - /** If true, known idle threads (e.g. waiting in a socket select, or to get a task from an empty queue) are filtered out. */ + /** If true, known idle threads (e.g. waiting in a socket select, or to get + * a task from an empty queue) are filtered out. */ ignore_idle_threads?: boolean /** The interval to do the second sampling of threads. */ interval?: Duration @@ -20127,7 +29579,8 @@ export interface NodesHotThreadsRequest extends RequestBase { snapshots?: long /** Specifies the number of hot threads to provide information for. */ threads?: long - /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ + /** Period to wait for a response. If no response is received + * before the timeout expires, the request fails and returns an error. */ timeout?: Duration /** The type to sample. */ type?: ThreadType @@ -20149,12 +29602,16 @@ export interface NodesInfoDeprecationIndexing { export interface NodesInfoNodeInfo { attributes: Record build_flavor: string + /** Short hash of the last git commit in this release. */ build_hash: string build_type: string + /** The node’s host name. */ host: Host http?: NodesInfoNodeInfoHttp + /** The node’s IP address. */ ip: Ip jvm?: NodesInfoNodeJvmInfo + /** The node's name */ name: Name network?: NodesInfoNodeInfoNetwork os?: NodesInfoNodeOperatingSystemInfo @@ -20163,10 +29620,14 @@ export interface NodesInfoNodeInfo { roles: NodeRoles settings?: NodesInfoNodeInfoSettings thread_pool?: Record + /** Total heap allowed to be used to hold recently indexed documents before they must be written to disk. This size is a shared pool across all shards on this node, and is controlled by Indexing Buffer settings. */ total_indexing_buffer?: long + /** Same as total_indexing_buffer, but expressed in bytes. */ total_indexing_buffer_in_bytes?: ByteSize transport?: NodesInfoNodeInfoTransport + /** Host and port where transport HTTP connections are accepted. */ transport_address: TransportAddress + /** Elasticsearch version running on this node. */ version: VersionString modules?: PluginStats[] ingest?: NodesInfoNodeInfoIngest @@ -20457,18 +29918,25 @@ export interface NodesInfoNodeJvmInfo { vm_vendor: string vm_version: VersionString using_bundled_jdk: boolean + /** @alias using_bundled_jdk */ bundled_jdk: boolean using_compressed_ordinary_object_pointers?: boolean | string input_arguments: string[] } export interface NodesInfoNodeOperatingSystemInfo { + /** Name of the JVM architecture (ex: amd64, x86) */ arch: string + /** Number of processors available to the Java virtual machine */ available_processors: integer + /** The number of processors actually used to calculate thread pool size. This number can be set with the node.processors setting of a node and defaults to the number of processors reported by the OS. */ allocated_processors?: integer + /** Name of the operating system (ex: Linux, Windows, Mac OS X) */ name: Name pretty_name: Name + /** Refresh interval for the OS statistics */ refresh_interval_in_millis: DurationValue + /** Version of the operating system */ version: VersionString cpu?: NodesInfoNodeInfoOSCPU mem?: NodesInfoNodeInfoMemory @@ -20476,8 +29944,11 @@ export interface NodesInfoNodeOperatingSystemInfo { } export interface NodesInfoNodeProcessInfo { + /** Process identifier (PID) */ id: long + /** Indicates if the process address space has been successfully locked in memory */ mlockall: boolean + /** Refresh interval for the process statistics */ refresh_interval_in_millis: DurationValue } @@ -20491,7 +29962,7 @@ export interface NodesInfoNodeThreadPoolInfo { } export interface NodesInfoRequest extends RequestBase { -/** Comma-separated list of node IDs or names used to limit returned information. */ + /** Comma-separated list of node IDs or names used to limit returned information. */ node_id?: NodeIds /** Limits the information returned to the specific metrics. Supports a comma-separated list, such as http,ingest. */ metric?: Metrics @@ -20513,9 +29984,10 @@ export interface NodesInfoResponseBase extends NodesNodesResponseBase { } export interface NodesReloadSecureSettingsRequest extends RequestBase { -/** The names of particular nodes in the cluster to target. */ + /** The names of particular nodes in the cluster to target. */ node_id?: NodeIds - /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ + /** Period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration /** The password for the Elasticsearch keystore. */ secure_settings_password?: Password @@ -20533,7 +30005,7 @@ export interface NodesReloadSecureSettingsResponseBase extends NodesNodesRespons } export interface NodesStatsRequest extends RequestBase { -/** Comma-separated list of node IDs or names used to limit returned information. */ + /** Comma-separated list of node IDs or names used to limit returned information. */ node_id?: NodeIds /** Limit the information returned to the specified metrics */ metric?: Metrics @@ -20578,11 +30050,13 @@ export interface NodesUsageNodeUsage { } export interface NodesUsageRequest extends RequestBase { -/** A comma-separated list of node IDs or names to limit the returned information; use `_local` to return information from the node you're connecting to, leave empty to get information from all nodes */ + /** A comma-separated list of node IDs or names to limit the returned information; use `_local` to return information from the node you're connecting to, leave empty to get information from all nodes */ node_id?: NodeIds - /** Limits the information returned to the specific metrics. A comma-separated list of the following options: `_all`, `rest_actions`. */ + /** Limits the information returned to the specific metrics. + * A comma-separated list of the following options: `_all`, `rest_actions`. */ metric?: Metrics - /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ + /** Period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { node_id?: never, metric?: never, timeout?: never } @@ -20598,21 +30072,56 @@ export interface NodesUsageResponseBase extends NodesNodesResponseBase { } export interface QueryRulesQueryRule { + /** A unique identifier for the rule. */ rule_id: Id + /** The type of rule. + * `pinned` will identify and pin specific documents to the top of search results. + * `exclude` will exclude specific documents from search results. */ type: QueryRulesQueryRuleType + /** The criteria that must be met for the rule to be applied. + * If multiple criteria are specified for a rule, all criteria must be met for the rule to be applied. */ criteria: QueryRulesQueryRuleCriteria | QueryRulesQueryRuleCriteria[] + /** The actions to take when the rule is matched. + * The format of this action depends on the rule type. */ actions: QueryRulesQueryRuleActions priority?: integer } export interface QueryRulesQueryRuleActions { + /** The unique document IDs of the documents to apply the rule to. + * Only one of `ids` or `docs` may be specified and at least one must be specified. */ ids?: Id[] + /** The documents to apply the rule to. + * Only one of `ids` or `docs` may be specified and at least one must be specified. + * There is a maximum value of 100 documents in a rule. + * You can specify the following attributes for each document: + * + * * `_index`: The index of the document to pin. + * * `_id`: The unique document ID. */ docs?: QueryDslPinnedDoc[] } export interface QueryRulesQueryRuleCriteria { + /** The type of criteria. The following criteria types are supported: + * + * * `always`: Matches all queries, regardless of input. + * * `contains`: Matches that contain this value anywhere in the field meet the criteria defined by the rule. Only applicable for string values. + * * `exact`: Only exact matches meet the criteria defined by the rule. Applicable for string or numerical values. + * * `fuzzy`: Exact matches or matches within the allowed Levenshtein Edit Distance meet the criteria defined by the rule. Only applicable for string values. + * * `gt`: Matches with a value greater than this value meet the criteria defined by the rule. Only applicable for numerical values. + * * `gte`: Matches with a value greater than or equal to this value meet the criteria defined by the rule. Only applicable for numerical values. + * * `lt`: Matches with a value less than this value meet the criteria defined by the rule. Only applicable for numerical values. + * * `lte`: Matches with a value less than or equal to this value meet the criteria defined by the rule. Only applicable for numerical values. + * * `prefix`: Matches that start with this value meet the criteria defined by the rule. Only applicable for string values. + * * `suffix`: Matches that end with this value meet the criteria defined by the rule. Only applicable for string values. */ type: QueryRulesQueryRuleCriteriaType + /** The metadata field to match against. + * This metadata will be used to match against `match_criteria` sent in the rule. + * It is required for all criteria types except `always`. */ metadata?: string + /** The values to match against the `metadata` field. + * Only one value must match for the criteria to be met. + * It is required for all criteria types except `always`. */ values?: any[] } @@ -20621,12 +30130,14 @@ export type QueryRulesQueryRuleCriteriaType = 'global' | 'exact' | 'exact_fuzzy' export type QueryRulesQueryRuleType = 'pinned' | 'exclude' export interface QueryRulesQueryRuleset { + /** A unique identifier for the ruleset. */ ruleset_id: Id + /** Rules associated with the query ruleset. */ rules: QueryRulesQueryRule[] } export interface QueryRulesDeleteRuleRequest extends RequestBase { -/** The unique identifier of the query ruleset containing the rule to delete */ + /** The unique identifier of the query ruleset containing the rule to delete */ ruleset_id: Id /** The unique identifier of the query rule within the specified ruleset to delete */ rule_id: Id @@ -20639,7 +30150,7 @@ export interface QueryRulesDeleteRuleRequest extends RequestBase { export type QueryRulesDeleteRuleResponse = AcknowledgedResponseBase export interface QueryRulesDeleteRulesetRequest extends RequestBase { -/** The unique identifier of the query ruleset to delete */ + /** The unique identifier of the query ruleset to delete */ ruleset_id: Id /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { ruleset_id?: never } @@ -20650,7 +30161,7 @@ export interface QueryRulesDeleteRulesetRequest extends RequestBase { export type QueryRulesDeleteRulesetResponse = AcknowledgedResponseBase export interface QueryRulesGetRuleRequest extends RequestBase { -/** The unique identifier of the query ruleset containing the rule to retrieve */ + /** The unique identifier of the query ruleset containing the rule to retrieve */ ruleset_id: Id /** The unique identifier of the query rule within the specified ruleset to retrieve */ rule_id: Id @@ -20663,7 +30174,7 @@ export interface QueryRulesGetRuleRequest extends RequestBase { export type QueryRulesGetRuleResponse = QueryRulesQueryRule export interface QueryRulesGetRulesetRequest extends RequestBase { -/** The unique identifier of the query ruleset */ + /** The unique identifier of the query ruleset */ ruleset_id: Id /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { ruleset_id?: never } @@ -20674,14 +30185,20 @@ export interface QueryRulesGetRulesetRequest extends RequestBase { export type QueryRulesGetRulesetResponse = QueryRulesQueryRuleset export interface QueryRulesListRulesetsQueryRulesetListItem { + /** A unique identifier for the ruleset. */ ruleset_id: Id + /** The number of rules associated with the ruleset. */ rule_total_count: integer + /** A map of criteria type (for example, `exact`) to the number of rules of that type. + * + * NOTE: The counts in `rule_criteria_types_counts` may be larger than the value of `rule_total_count` because a rule may have multiple criteria. */ rule_criteria_types_counts: Record + /** A map of rule type (for example, `pinned`) to the number of rules of that type. */ rule_type_counts: Record } export interface QueryRulesListRulesetsRequest extends RequestBase { -/** The offset from the first result to fetch. */ + /** The offset from the first result to fetch. */ from?: integer /** The maximum number of results to retrieve. */ size?: integer @@ -20697,15 +30214,17 @@ export interface QueryRulesListRulesetsResponse { } export interface QueryRulesPutRuleRequest extends RequestBase { -/** The unique identifier of the query ruleset containing the rule to be created or updated. */ + /** The unique identifier of the query ruleset containing the rule to be created or updated. */ ruleset_id: Id /** The unique identifier of the query rule within the specified ruleset to be created or updated. */ rule_id: Id /** The type of rule. */ type: QueryRulesQueryRuleType - /** The criteria that must be met for the rule to be applied. If multiple criteria are specified for a rule, all criteria must be met for the rule to be applied. */ + /** The criteria that must be met for the rule to be applied. + * If multiple criteria are specified for a rule, all criteria must be met for the rule to be applied. */ criteria: QueryRulesQueryRuleCriteria | QueryRulesQueryRuleCriteria[] - /** The actions to take when the rule is matched. The format of this action depends on the rule type. */ + /** The actions to take when the rule is matched. + * The format of this action depends on the rule type. */ actions: QueryRulesQueryRuleActions priority?: integer /** All values in `body` will be added to the request body. */ @@ -20719,7 +30238,7 @@ export interface QueryRulesPutRuleResponse { } export interface QueryRulesPutRulesetRequest extends RequestBase { -/** The unique identifier of the query ruleset to be created or updated. */ + /** The unique identifier of the query ruleset to be created or updated. */ ruleset_id: Id rules: QueryRulesQueryRule | QueryRulesQueryRule[] /** All values in `body` will be added to the request body. */ @@ -20733,14 +30252,17 @@ export interface QueryRulesPutRulesetResponse { } export interface QueryRulesTestQueryRulesetMatchedRule { + /** Ruleset unique identifier */ ruleset_id: Id + /** Rule unique identifier within that ruleset */ rule_id: Id } export interface QueryRulesTestRequest extends RequestBase { -/** The unique identifier of the query ruleset to be created or updated */ + /** The unique identifier of the query ruleset to be created or updated */ ruleset_id: Id - /** The match criteria to apply to rules in the given query ruleset. Match criteria should match the keys defined in the `criteria.metadata` field of the rule. */ + /** The match criteria to apply to rules in the given query ruleset. + * Match criteria should match the keys defined in the `criteria.metadata` field of the rule. */ match_criteria: Record /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { ruleset_id?: never, match_criteria?: never } @@ -20754,39 +30276,67 @@ export interface QueryRulesTestResponse { } export interface RollupDateHistogramGrouping { + /** How long to wait before rolling up new documents. + * By default, the indexer attempts to roll up all data that is available. + * However, it is not uncommon for data to arrive out of order. + * The indexer is unable to deal with data that arrives after a time-span has been rolled up. + * You need to specify a delay that matches the longest period of time you expect out-of-order data to arrive. */ delay?: Duration + /** The date field that is to be rolled up. */ field: Field format?: string interval?: Duration + /** The interval of time buckets to be generated when rolling up. */ calendar_interval?: Duration + /** The interval of time buckets to be generated when rolling up. */ fixed_interval?: Duration + /** Defines what `time_zone` the rollup documents are stored as. + * Unlike raw data, which can shift timezones on the fly, rolled documents have to be stored with a specific timezone. + * By default, rollup documents are stored in `UTC`. */ time_zone?: TimeZone } export interface RollupFieldMetric { + /** The field to collect metrics for. This must be a numeric of some kind. */ field: Field + /** An array of metrics to collect for the field. At least one metric must be configured. */ metrics: RollupMetric[] } export interface RollupGroupings { + /** A date histogram group aggregates a date field into time-based buckets. + * This group is mandatory; you currently cannot roll up documents without a timestamp and a `date_histogram` group. */ date_histogram?: RollupDateHistogramGrouping + /** The histogram group aggregates one or more numeric fields into numeric histogram intervals. */ histogram?: RollupHistogramGrouping + /** The terms group can be used on keyword or numeric fields to allow bucketing via the terms aggregation at a later point. + * The indexer enumerates and stores all values of a field for each time-period. + * This can be potentially costly for high-cardinality groups such as IP addresses, especially if the time-bucket is particularly sparse. */ terms?: RollupTermsGrouping } export interface RollupHistogramGrouping { + /** The set of fields that you wish to build histograms for. + * All fields specified must be some kind of numeric. + * Order does not matter. */ fields: Fields + /** The interval of histogram buckets to be generated when rolling up. + * For example, a value of `5` creates buckets that are five units wide (`0-5`, `5-10`, etc). + * Note that only one interval can be specified in the histogram group, meaning that all fields being grouped via the histogram must share the same interval. */ interval: long } export type RollupMetric = 'min' | 'max' | 'sum' | 'avg' | 'value_count' export interface RollupTermsGrouping { + /** The set of fields that you wish to collect terms for. + * This array can contain fields that are both keyword and numerics. + * Order does not matter. */ fields: Fields } export interface RollupDeleteJobRequest extends RequestBase { -/** Identifier for the job. */ + /** Identifier for the job. */ id: Id /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { id?: never } @@ -20802,7 +30352,8 @@ export interface RollupDeleteJobResponse { export type RollupGetJobsIndexingJobState = 'started' | 'indexing' | 'stopping' | 'stopped' | 'aborting' export interface RollupGetJobsRequest extends RequestBase { -/** Identifier for the rollup job. If it is `_all` or omitted, the API returns all rollup jobs. */ + /** Identifier for the rollup job. + * If it is `_all` or omitted, the API returns all rollup jobs. */ id?: Id /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { id?: never } @@ -20815,8 +30366,13 @@ export interface RollupGetJobsResponse { } export interface RollupGetJobsRollupJob { + /** The rollup job configuration. */ config: RollupGetJobsRollupJobConfiguration + /** Transient statistics about the rollup job, such as how many documents have been processed and how many rollup summary docs have been indexed. + * These stats are not persisted. + * If a node is restarted, these stats are reset. */ stats: RollupGetJobsRollupJobStats + /** The current status of the indexer for the rollup job. */ status: RollupGetJobsRollupJobStatus } @@ -20853,7 +30409,8 @@ export interface RollupGetJobsRollupJobStatus { } export interface RollupGetRollupCapsRequest extends RequestBase { -/** Index, indices or index-pattern to return rollup capabilities for. `_all` may be used to fetch rollup capabilities from all jobs. */ + /** Index, indices or index-pattern to return rollup capabilities for. + * `_all` may be used to fetch rollup capabilities from all jobs. */ id?: Id /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { id?: never } @@ -20864,6 +30421,7 @@ export interface RollupGetRollupCapsRequest extends RequestBase { export type RollupGetRollupCapsResponse = Record export interface RollupGetRollupCapsRollupCapabilities { + /** There can be multiple, independent jobs configured for a single index or index pattern. Each of these jobs may have different configurations, so the API returns a list of all the various configurations available. */ rollup_jobs: RollupGetRollupCapsRollupCapabilitySummary[] } @@ -20885,7 +30443,8 @@ export interface RollupGetRollupIndexCapsIndexCapabilities { } export interface RollupGetRollupIndexCapsRequest extends RequestBase { -/** Data stream or index to check for rollup capabilities. Wildcard (`*`) expressions are supported. */ + /** Data stream or index to check for rollup capabilities. + * Wildcard (`*`) expressions are supported. */ index: Ids /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { index?: never } @@ -20909,17 +30468,34 @@ export interface RollupGetRollupIndexCapsRollupJobSummaryField { } export interface RollupPutJobRequest extends RequestBase { -/** Identifier for the rollup job. This can be any alphanumeric string and uniquely identifies the data that is associated with the rollup job. The ID is persistent; it is stored with the rolled up data. If you create a job, let it run for a while, then delete the job, the data that the job rolled up is still be associated with this job ID. You cannot create a new job with the same ID since that could lead to problems with mismatched job configurations. */ + /** Identifier for the rollup job. This can be any alphanumeric string and uniquely identifies the + * data that is associated with the rollup job. The ID is persistent; it is stored with the rolled + * up data. If you create a job, let it run for a while, then delete the job, the data that the job + * rolled up is still be associated with this job ID. You cannot create a new job with the same ID + * since that could lead to problems with mismatched job configurations. */ id: Id - /** A cron string which defines the intervals when the rollup job should be executed. When the interval triggers, the indexer attempts to rollup the data in the index pattern. The cron pattern is unrelated to the time interval of the data being rolled up. For example, you may wish to create hourly rollups of your document but to only run the indexer on a daily basis at midnight, as defined by the cron. The cron pattern is defined just like a Watcher cron schedule. */ + /** A cron string which defines the intervals when the rollup job should be executed. When the interval + * triggers, the indexer attempts to rollup the data in the index pattern. The cron pattern is unrelated + * to the time interval of the data being rolled up. For example, you may wish to create hourly rollups + * of your document but to only run the indexer on a daily basis at midnight, as defined by the cron. The + * cron pattern is defined just like a Watcher cron schedule. */ cron: string - /** Defines the grouping fields and aggregations that are defined for this rollup job. These fields will then be available later for aggregating into buckets. These aggs and fields can be used in any combination. Think of the groups configuration as defining a set of tools that can later be used in aggregations to partition the data. Unlike raw data, we have to think ahead to which fields and aggregations might be used. Rollups provide enough flexibility that you simply need to determine which fields are needed, not in what order they are needed. */ + /** Defines the grouping fields and aggregations that are defined for this rollup job. These fields will then be + * available later for aggregating into buckets. These aggs and fields can be used in any combination. Think of + * the groups configuration as defining a set of tools that can later be used in aggregations to partition the + * data. Unlike raw data, we have to think ahead to which fields and aggregations might be used. Rollups provide + * enough flexibility that you simply need to determine which fields are needed, not in what order they are needed. */ groups: RollupGroupings - /** The index or index pattern to roll up. Supports wildcard-style patterns (`logstash-*`). The job attempts to rollup the entire index or index-pattern. */ + /** The index or index pattern to roll up. Supports wildcard-style patterns (`logstash-*`). The job attempts to + * rollup the entire index or index-pattern. */ index_pattern: string - /** Defines the metrics to collect for each grouping tuple. By default, only the doc_counts are collected for each group. To make rollup useful, you will often add metrics like averages, mins, maxes, etc. Metrics are defined on a per-field basis and for each field you configure which metric should be collected. */ + /** Defines the metrics to collect for each grouping tuple. By default, only the doc_counts are collected for each + * group. To make rollup useful, you will often add metrics like averages, mins, maxes, etc. Metrics are defined + * on a per-field basis and for each field you configure which metric should be collected. */ metrics?: RollupFieldMetric[] - /** The number of bucket results that are processed on each iteration of the rollup indexer. A larger value tends to execute faster, but requires more memory during processing. This value has no effect on how the data is rolled up; it is merely used for tweaking the speed or memory cost of the indexer. */ + /** The number of bucket results that are processed on each iteration of the rollup indexer. A larger value tends + * to execute faster, but requires more memory during processing. This value has no effect on how the data is + * rolled up; it is merely used for tweaking the speed or memory cost of the indexer. */ page_size: integer /** The index that contains the rollup results. The index can be shared with other rollup jobs. The data is stored so that it doesn’t interfere with unrelated jobs. */ rollup_index: IndexName @@ -20935,7 +30511,13 @@ export interface RollupPutJobRequest extends RequestBase { export type RollupPutJobResponse = AcknowledgedResponseBase export interface RollupRollupSearchRequest extends RequestBase { -/** A comma-separated list of data streams and indices used to limit the request. This parameter has the following rules: * At least one data stream, index, or wildcard expression must be specified. This target can include a rollup or non-rollup index. For data streams, the stream's backing indices can only serve as non-rollup indices. Omitting the parameter or using `_all` are not permitted. * Multiple non-rollup indices may be specified. * Only one rollup index may be specified. If more than one are supplied, an exception occurs. * Wildcard expressions (`*`) may be used. If they match more than one rollup index, an exception occurs. However, you can use an expression to match multiple non-rollup indices or data streams. */ + /** A comma-separated list of data streams and indices used to limit the request. + * This parameter has the following rules: + * + * * At least one data stream, index, or wildcard expression must be specified. This target can include a rollup or non-rollup index. For data streams, the stream's backing indices can only serve as non-rollup indices. Omitting the parameter or using `_all` are not permitted. + * * Multiple non-rollup indices may be specified. + * * Only one rollup index may be specified. If more than one are supplied, an exception occurs. + * * Wildcard expressions (`*`) may be used. If they match more than one rollup index, an exception occurs. However, you can use an expression to match multiple non-rollup indices or data streams. */ index: Indices /** Indicates whether hits.total should be rendered as an integer or an object in the rest search response */ rest_total_hits_as_int?: boolean @@ -20943,8 +30525,8 @@ export interface RollupRollupSearchRequest extends RequestBase { typed_keys?: boolean /** Specifies aggregations. */ aggregations?: Record - /** @alias aggregations */ - /** Specifies aggregations. */ + /** Specifies aggregations. + * @alias aggregations */ aggs?: Record /** Specifies a DSL query that is subject to some limitations. */ query?: QueryDslQueryContainer @@ -20966,7 +30548,7 @@ export interface RollupRollupSearchResponse } export interface SearchApplicationSearchApplicationParameters { + /** Indices that are part of the Search Application. */ indices: IndexName[] + /** Analytics collection associated to the Search Application. */ analytics_collection_name?: Name + /** Search template to use on search operations. */ template?: SearchApplicationSearchApplicationTemplate } export interface SearchApplicationSearchApplicationTemplate { - script: Script | string + /** The associated mustache template. */ + script: Script | ScriptSource } export interface SearchApplicationDeleteRequest extends RequestBase { -/** The name of the search application to delete. */ + /** The name of the search application to delete. */ name: Name /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { name?: never } @@ -21032,7 +30625,7 @@ export interface SearchApplicationDeleteRequest extends RequestBase { export type SearchApplicationDeleteResponse = AcknowledgedResponseBase export interface SearchApplicationDeleteBehavioralAnalyticsRequest extends RequestBase { -/** The name of the analytics collection to be deleted */ + /** The name of the analytics collection to be deleted */ name: Name /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { name?: never } @@ -21043,7 +30636,7 @@ export interface SearchApplicationDeleteBehavioralAnalyticsRequest extends Reque export type SearchApplicationDeleteBehavioralAnalyticsResponse = AcknowledgedResponseBase export interface SearchApplicationGetRequest extends RequestBase { -/** The name of the search application */ + /** The name of the search application */ name: Name /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { name?: never } @@ -21054,7 +30647,7 @@ export interface SearchApplicationGetRequest extends RequestBase { export type SearchApplicationGetResponse = SearchApplicationSearchApplication export interface SearchApplicationGetBehavioralAnalyticsRequest extends RequestBase { -/** A list of analytics collections to limit the returned information */ + /** A list of analytics collections to limit the returned information */ name?: Name[] /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { name?: never } @@ -21065,7 +30658,7 @@ export interface SearchApplicationGetBehavioralAnalyticsRequest extends RequestB export type SearchApplicationGetBehavioralAnalyticsResponse = Record export interface SearchApplicationListRequest extends RequestBase { -/** Query in the Lucene query string syntax. */ + /** Query in the Lucene query string syntax. */ q?: string /** Starting offset. */ from?: integer @@ -21083,7 +30676,7 @@ export interface SearchApplicationListResponse { } export interface SearchApplicationPostBehavioralAnalyticsEventRequest extends RequestBase { -/** The name of the behavioral analytics collection. */ + /** The name of the behavioral analytics collection. */ collection_name: Name /** The analytics event type. */ event_type: SearchApplicationEventType @@ -21102,7 +30695,7 @@ export interface SearchApplicationPostBehavioralAnalyticsEventResponse { } export interface SearchApplicationPutRequest extends RequestBase { -/** The name of the search application to be created or updated. */ + /** The name of the search application to be created or updated. */ name: Name /** If `true`, this request cannot replace or update existing Search Applications. */ create?: boolean @@ -21118,11 +30711,12 @@ export interface SearchApplicationPutResponse { } export interface SearchApplicationPutBehavioralAnalyticsAnalyticsAcknowledgeResponseBase extends AcknowledgedResponseBase { + /** The name of the analytics collection created or updated */ name: Name } export interface SearchApplicationPutBehavioralAnalyticsRequest extends RequestBase { -/** The name of the analytics collection to be created or updated. */ + /** The name of the analytics collection to be created or updated. */ name: Name /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { name?: never } @@ -21133,7 +30727,7 @@ export interface SearchApplicationPutBehavioralAnalyticsRequest extends RequestB export type SearchApplicationPutBehavioralAnalyticsResponse = SearchApplicationPutBehavioralAnalyticsAnalyticsAcknowledgeResponseBase export interface SearchApplicationRenderQueryRequest extends RequestBase { -/** The name of the search application to render teh query for. */ + /** The name of the search application to render teh query for. */ name: Name params?: Record /** All values in `body` will be added to the request body. */ @@ -21146,7 +30740,7 @@ export interface SearchApplicationRenderQueryResponse { } export interface SearchApplicationSearchRequest extends RequestBase { -/** The name of the search application to be searched. */ + /** The name of the search application to be searched. */ name: Name /** Determines whether aggregation names are prefixed by their respective types in the response. */ typed_keys?: boolean @@ -21167,7 +30761,7 @@ export interface SearchableSnapshotsCacheStatsNode { } export interface SearchableSnapshotsCacheStatsRequest extends RequestBase { -/** The names of the nodes in the cluster to target. */ + /** The names of the nodes in the cluster to target. */ node_id?: NodeIds master_timeout?: Duration /** All values in `body` will be added to the request body. */ @@ -21192,7 +30786,8 @@ export interface SearchableSnapshotsCacheStatsShared { } export interface SearchableSnapshotsClearCacheRequest extends RequestBase { -/** A comma-separated list of data streams, indices, and aliases to clear from the cache. It supports wildcards (`*`). */ + /** A comma-separated list of data streams, indices, and aliases to clear from the cache. + * It supports wildcards (`*`). */ index?: Indices /** Whether to expand wildcard expression to concrete indices that are open, closed or both. */ expand_wildcards?: ExpandWildcards @@ -21215,17 +30810,20 @@ export interface SearchableSnapshotsMountMountedSnapshot { } export interface SearchableSnapshotsMountRequest extends RequestBase { -/** The name of the repository containing the snapshot of the index to mount. */ + /** The name of the repository containing the snapshot of the index to mount. */ repository: Name /** The name of the snapshot of the index to mount. */ snapshot: Name - /** The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. */ + /** The period to wait for the master node. + * If the master node is not available before the timeout expires, the request fails and returns an error. + * To indicate that the request should never timeout, set it to `-1`. */ master_timeout?: Duration /** If true, the request blocks until the operation is complete. */ wait_for_completion?: boolean /** The mount option for the searchable snapshot index. */ storage?: string - /** The name of the index contained in the snapshot whose data is to be mounted. If no `renamed_index` is specified, this name will also be used to create the new index. */ + /** The name of the index contained in the snapshot whose data is to be mounted. + * If no `renamed_index` is specified, this name will also be used to create the new index. */ index: IndexName /** The name of the index that will be created. */ renamed_index?: IndexName @@ -21244,7 +30842,7 @@ export interface SearchableSnapshotsMountResponse { } export interface SearchableSnapshotsStatsRequest extends RequestBase { -/** A comma-separated list of data streams and indices to retrieve statistics for. */ + /** A comma-separated list of data streams and indices to retrieve statistics for. */ index?: Indices /** Return stats aggregated at cluster, index or shard level */ level?: SearchableSnapshotsStatsLevel @@ -21260,26 +30858,51 @@ export interface SearchableSnapshotsStatsResponse { } export interface SecurityAccess { + /** A list of indices permission entries for cross-cluster replication. */ replication?: SecurityReplicationAccess[] + /** A list of indices permission entries for cross-cluster search. */ search?: SecuritySearchAccess[] } export interface SecurityApiKey { + /** Id for the API key */ id: Id + /** Name of the API key. */ name: Name + /** The type of the API key (e.g. `rest` or `cross_cluster`). */ type: SecurityApiKeyType + /** Creation time for the API key in milliseconds. */ creation: EpochTime + /** Expiration time for the API key in milliseconds. */ expiration?: EpochTime + /** Invalidation status for the API key. + * If the key has been invalidated, it has a value of `true`. Otherwise, it is `false`. */ invalidated: boolean + /** If the key has been invalidated, invalidation time in milliseconds. */ invalidation?: EpochTime + /** Principal for which this API key was created */ username: Username + /** Realm name of the principal for which this API key was created. */ realm: string + /** Realm type of the principal for which this API key was created */ realm_type?: string + /** Metadata of the API key */ metadata: Metadata + /** The role descriptors assigned to this API key when it was created or last updated. + * An empty role descriptor means the API key inherits the owner user’s permissions. */ role_descriptors?: Record + /** The owner user’s permissions associated with the API key. + * It is a point-in-time snapshot captured at creation and subsequent updates. + * An API key’s effective permissions are an intersection of its assigned privileges and the owner user’s permissions. */ limited_by?: Record[] + /** The access granted to cross-cluster API keys. + * The access is composed of permissions for cross cluster search and cross cluster replication. + * At least one of them must be specified. + * When specified, the new access assignment fully replaces the previously assigned access. */ access?: SecurityAccess + /** The profile uid for the API key owner principal, if requested and if it exists */ profile_uid?: string + /** Sorting values when using the `sort` parameter with the `security.query_api_keys` API. */ _sort?: SortResults } @@ -21290,13 +30913,18 @@ export interface SecurityApplicationGlobalUserPrivileges { } export interface SecurityApplicationPrivileges { + /** The name of the application to which this entry applies. */ application: string + /** A list of strings, where each element is the name of an application privilege or action. */ privileges: string[] + /** A list resources to which the privileges are applied. */ resources: string[] } export interface SecurityBulkError { + /** The number of errors */ count: integer + /** Details about the errors, keyed by role name */ details: Record } @@ -21310,12 +30938,6 @@ export interface SecurityCreatedStatus { created: boolean } -export interface SecurityFieldRule { - username?: Names - dn?: Names - groups?: Names -} - export interface SecurityFieldSecurity { except?: Fields grant?: Fields @@ -21330,10 +30952,16 @@ export type SecurityGrantType = 'password' | 'access_token' export type SecurityIndexPrivilege = 'all' | 'auto_configure' | 'create' | 'create_doc' | 'create_index' | 'cross_cluster_replication' | 'cross_cluster_replication_internal' | 'delete' | 'delete_index' | 'index' | 'maintenance' | 'manage' | 'manage_data_stream_lifecycle' | 'manage_follow_index' | 'manage_ilm' | 'manage_leader_index' | 'monitor' | 'none' | 'read' | 'read_cross_cluster' | 'view_index_metadata' | 'write' | string export interface SecurityIndicesPrivileges { + /** The document fields that the owners of the role have read access to. */ field_security?: SecurityFieldSecurity + /** A list of indices (or index name patterns) to which the permissions in this entry apply. */ names: IndexName | IndexName[] + /** The index level privileges that owners of the role have on the specified indices. */ privileges: SecurityIndexPrivilege[] + /** A search query that defines the documents the owners of the role have access to. A document within the specified indices must match this query for it to be accessible by the owners of the role. */ query?: SecurityIndicesPrivilegesQuery + /** Set to `true` if using wildcard or regular expressions for patterns that cover restricted indices. Implicitly, restricted indices have limited privileges that can cause pattern tests to fail. If restricted indices are explicitly included in the `names` list, Elasticsearch checks privileges against these indices regardless of the value set for `allow_restricted_indices`. + * @remarks This property is not supported on Elastic Cloud Serverless. */ allow_restricted_indices?: boolean } @@ -21351,56 +30979,117 @@ export interface SecurityRealmInfo { export type SecurityRemoteClusterPrivilege = 'monitor_enrich' | 'monitor_stats' export interface SecurityRemoteClusterPrivileges { + /** A list of cluster aliases to which the permissions in this entry apply. */ clusters: Names + /** The cluster level privileges that owners of the role have on the remote cluster. */ privileges: SecurityRemoteClusterPrivilege[] } export interface SecurityRemoteIndicesPrivileges { + /** A list of cluster aliases to which the permissions in this entry apply. */ clusters: Names + /** The document fields that the owners of the role have read access to. */ field_security?: SecurityFieldSecurity + /** A list of indices (or index name patterns) to which the permissions in this entry apply. */ names: IndexName | IndexName[] + /** The index level privileges that owners of the role have on the specified indices. */ privileges: SecurityIndexPrivilege[] + /** A search query that defines the documents the owners of the role have access to. A document within the specified indices must match this query for it to be accessible by the owners of the role. */ query?: SecurityIndicesPrivilegesQuery + /** Set to `true` if using wildcard or regular expressions for patterns that cover restricted indices. Implicitly, restricted indices have limited privileges that can cause pattern tests to fail. If restricted indices are explicitly included in the `names` list, Elasticsearch checks privileges against these indices regardless of the value set for `allow_restricted_indices`. + * @remarks This property is not supported on Elastic Cloud Serverless. */ allow_restricted_indices?: boolean } +export interface SecurityRemoteUserIndicesPrivileges { + /** The document fields that the owners of the role have read access to. */ + field_security?: SecurityFieldSecurity[] + /** A list of indices (or index name patterns) to which the permissions in this entry apply. */ + names: IndexName | IndexName[] + /** The index level privileges that owners of the role have on the specified indices. */ + privileges: SecurityIndexPrivilege[] + /** Search queries that define the documents the user has access to. A document within the specified indices must match these queries for it to be accessible by the owners of the role. */ + query?: SecurityIndicesPrivilegesQuery[] + /** Set to `true` if using wildcard or regular expressions for patterns that cover restricted indices. Implicitly, restricted indices have limited privileges that can cause pattern tests to fail. If restricted indices are explicitly included in the `names` list, Elasticsearch checks privileges against these indices regardless of the value set for `allow_restricted_indices`. */ + allow_restricted_indices: boolean + clusters: string[] +} + export interface SecurityReplicationAccess { + /** A list of indices (or index name patterns) to which the permissions in this entry apply. */ names: IndexName | IndexName[] + /** This needs to be set to true if the patterns in the names field should cover system indices. */ allow_restricted_indices?: boolean } export interface SecurityRestriction { + /** A list of workflows to which the API key is restricted. + * NOTE: In order to use a role restriction, an API key must be created with a single role descriptor. */ workflows: SecurityRestrictionWorkflow[] } export type SecurityRestrictionWorkflow = 'search_application_query' | string export interface SecurityRoleDescriptor { + /** A list of cluster privileges. These privileges define the cluster level actions that API keys are able to execute. */ cluster?: SecurityClusterPrivilege[] + /** A list of indices permissions entries. */ indices?: SecurityIndicesPrivileges[] + /** A list of indices permissions entries. + * @alias indices */ index?: SecurityIndicesPrivileges[] + /** A list of indices permissions for remote clusters. + * @remarks This property is not supported on Elastic Cloud Serverless. */ remote_indices?: SecurityRemoteIndicesPrivileges[] + /** A list of cluster permissions for remote clusters. + * NOTE: This is limited a subset of the cluster permissions. + * @remarks This property is not supported on Elastic Cloud Serverless. */ remote_cluster?: SecurityRemoteClusterPrivileges[] + /** An object defining global privileges. A global privilege is a form of cluster privilege that is request-aware. Support for global privileges is currently limited to the management of application privileges. + * @remarks This property is not supported on Elastic Cloud Serverless. */ global?: SecurityGlobalPrivilege[] | SecurityGlobalPrivilege + /** A list of application privilege entries */ applications?: SecurityApplicationPrivileges[] + /** Optional meta-data. Within the metadata object, keys that begin with `_` are reserved for system usage. */ metadata?: Metadata + /** A list of users that the API keys can impersonate. + * NOTE: In Elastic Cloud Serverless, the run-as feature is disabled. + * For API compatibility, you can still specify an empty `run_as` field, but a non-empty list will be rejected. */ run_as?: string[] + /** Optional description of the role descriptor */ description?: string + /** Restriction for when the role descriptor is allowed to be effective. */ restriction?: SecurityRestriction transient_metadata?: Record } export interface SecurityRoleDescriptorRead { + /** A list of cluster privileges. These privileges define the cluster level actions that API keys are able to execute. */ cluster: SecurityClusterPrivilege[] + /** A list of indices permissions entries. */ indices: SecurityIndicesPrivileges[] + /** A list of indices permissions entries. + * @alias indices */ index: SecurityIndicesPrivileges[] + /** A list of indices permissions for remote clusters. + * @remarks This property is not supported on Elastic Cloud Serverless. */ remote_indices?: SecurityRemoteIndicesPrivileges[] + /** A list of cluster permissions for remote clusters. + * NOTE: This is limited a subset of the cluster permissions. + * @remarks This property is not supported on Elastic Cloud Serverless. */ remote_cluster?: SecurityRemoteClusterPrivileges[] + /** An object defining global privileges. A global privilege is a form of cluster privilege that is request-aware. Support for global privileges is currently limited to the management of application privileges. + * @remarks This property is not supported on Elastic Cloud Serverless. */ global?: SecurityGlobalPrivilege[] | SecurityGlobalPrivilege + /** A list of application privilege entries */ applications?: SecurityApplicationPrivileges[] + /** Optional meta-data. Within the metadata object, keys that begin with `_` are reserved for system usage. */ metadata?: Metadata + /** A list of users that the API keys can impersonate. */ run_as?: string[] + /** An optional description of the role descriptor. */ description?: string + /** A restriction for when the role descriptor is allowed to be effective. */ restriction?: SecurityRestriction transient_metadata?: Record } @@ -21416,33 +31105,46 @@ export interface SecurityRoleMapping { export interface SecurityRoleMappingRule { any?: SecurityRoleMappingRule[] all?: SecurityRoleMappingRule[] - field?: SecurityFieldRule + field?: Partial> except?: SecurityRoleMappingRule } export interface SecurityRoleTemplate { format?: SecurityTemplateFormat - template: Script | string + template: Script | ScriptSource } export type SecurityRoleTemplateInlineQuery = string | QueryDslQueryContainer export interface SecurityRoleTemplateQuery { + /** When you create a role, you can specify a query that defines the document level security permissions. You can optionally + * use Mustache templates in the role query to insert the username of the current authenticated user into the role. + * Like other places in Elasticsearch that support templating or scripting, you can specify inline, stored, or file-based + * templates and define custom parameters. You access the details for the current authenticated user through the _user parameter. */ template?: SecurityRoleTemplateScript | SecurityRoleTemplateInlineQuery } export interface SecurityRoleTemplateScript { source?: SecurityRoleTemplateInlineQuery + /** The `id` for a stored script. */ id?: Id + /** Specifies any named parameters that are passed into the script as variables. + * Use parameters instead of hard-coded values to decrease compile time. */ params?: Record + /** Specifies the language the script is written in. */ lang?: ScriptLanguage options?: Record } export interface SecuritySearchAccess { + /** The document fields that the owners of the role have read access to. */ field_security?: SecurityFieldSecurity + /** A list of indices (or index name patterns) to which the permissions in this entry apply. */ names: IndexName | IndexName[] + /** A search query that defines the documents the owners of the role have access to. A document within the specified indices must match this query for it to be accessible by the owners of the role. */ query?: SecurityIndicesPrivilegesQuery + /** Set to `true` if using wildcard or regular expressions for patterns that cover restricted indices. Implicitly, restricted indices have limited privileges that can cause pattern tests to fail. If restricted indices are explicitly included in the `names` list, Elasticsearch checks privileges against these indices regardless of the value set for `allow_restricted_indices`. + * @remarks This property is not supported on Elastic Cloud Serverless. */ allow_restricted_indices?: boolean } @@ -21463,10 +31165,15 @@ export interface SecurityUser { } export interface SecurityUserIndicesPrivileges { + /** The document fields that the owners of the role have read access to. */ field_security?: SecurityFieldSecurity[] + /** A list of indices (or index name patterns) to which the permissions in this entry apply. */ names: IndexName | IndexName[] + /** The index level privileges that owners of the role have on the specified indices. */ privileges: SecurityIndexPrivilege[] + /** Search queries that define the documents the user has access to. A document within the specified indices must match these queries for it to be accessible by the owners of the role. */ query?: SecurityIndicesPrivilegesQuery[] + /** Set to `true` if using wildcard or regular expressions for patterns that cover restricted indices. Implicitly, restricted indices have limited privileges that can cause pattern tests to fail. If restricted indices are explicitly included in the `names` list, Elasticsearch checks privileges against these indices regardless of the value set for `allow_restricted_indices`. */ allow_restricted_indices: boolean } @@ -21500,13 +31207,20 @@ export interface SecurityUserProfileWithMetadata extends SecurityUserProfile { } export interface SecurityActivateUserProfileRequest extends RequestBase { -/** The user's Elasticsearch access token or JWT. Both `access` and `id` JWT token types are supported and they depend on the underlying JWT realm configuration. If you specify the `access_token` grant type, this parameter is required. It is not valid with other grant types. */ + /** The user's Elasticsearch access token or JWT. + * Both `access` and `id` JWT token types are supported and they depend on the underlying JWT realm configuration. + * If you specify the `access_token` grant type, this parameter is required. + * It is not valid with other grant types. */ access_token?: string /** The type of grant. */ grant_type: SecurityGrantType - /** The user's password. If you specify the `password` grant type, this parameter is required. It is not valid with other grant types. */ + /** The user's password. + * If you specify the `password` grant type, this parameter is required. + * It is not valid with other grant types. */ password?: string - /** The username that identifies the user. If you specify the `password` grant type, this parameter is required. It is not valid with other grant types. */ + /** The username that identifies the user. + * If you specify the `password` grant type, this parameter is required. + * It is not valid with other grant types. */ username?: string /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { access_token?: never, grant_type?: never, password?: never, username?: never } @@ -21548,7 +31262,7 @@ export interface SecurityAuthenticateToken { } export interface SecurityBulkDeleteRoleRequest extends RequestBase { -/** If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. */ + /** If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. */ refresh?: Refresh /** An array of role names to delete */ names: string[] @@ -21559,13 +31273,16 @@ export interface SecurityBulkDeleteRoleRequest extends RequestBase { } export interface SecurityBulkDeleteRoleResponse { + /** Array of deleted roles */ deleted?: string[] + /** Array of roles that could not be found */ not_found?: string[] + /** Present if any deletes resulted in errors */ errors?: SecurityBulkError } export interface SecurityBulkPutRoleRequest extends RequestBase { -/** If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. */ + /** If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. */ refresh?: Refresh /** A dictionary of role name to RoleDescriptor objects to add or update */ roles: Record @@ -21576,20 +31293,34 @@ export interface SecurityBulkPutRoleRequest extends RequestBase { } export interface SecurityBulkPutRoleResponse { + /** Array of created roles */ created?: string[] + /** Array of updated roles */ updated?: string[] + /** Array of role names without any changes */ noop?: string[] + /** Present if any updates resulted in errors */ errors?: SecurityBulkError } export interface SecurityBulkUpdateApiKeysRequest extends RequestBase { -/** Expiration time for the API keys. By default, API keys never expire. This property can be omitted to leave the value unchanged. */ + /** Expiration time for the API keys. + * By default, API keys never expire. + * This property can be omitted to leave the value unchanged. */ expiration?: Duration /** The API key identifiers. */ ids: string | string[] - /** Arbitrary nested metadata to associate with the API keys. Within the `metadata` object, top-level keys beginning with an underscore (`_`) are reserved for system usage. Any information specified with this parameter fully replaces metadata previously associated with the API key. */ + /** Arbitrary nested metadata to associate with the API keys. + * Within the `metadata` object, top-level keys beginning with an underscore (`_`) are reserved for system usage. + * Any information specified with this parameter fully replaces metadata previously associated with the API key. */ metadata?: Metadata - /** The role descriptors to assign to the API keys. An API key's effective permissions are an intersection of its assigned privileges and the point-in-time snapshot of permissions of the owner user. You can assign new privileges by specifying them in this parameter. To remove assigned privileges, supply the `role_descriptors` parameter as an empty object `{}`. If an API key has no assigned privileges, it inherits the owner user's full permissions. The snapshot of the owner's permissions is always updated, whether you supply the `role_descriptors` parameter. The structure of a role descriptor is the same as the request for the create API keys API. */ + /** The role descriptors to assign to the API keys. + * An API key's effective permissions are an intersection of its assigned privileges and the point-in-time snapshot of permissions of the owner user. + * You can assign new privileges by specifying them in this parameter. + * To remove assigned privileges, supply the `role_descriptors` parameter as an empty object `{}`. + * If an API key has no assigned privileges, it inherits the owner user's full permissions. + * The snapshot of the owner's permissions is always updated, whether you supply the `role_descriptors` parameter. + * The structure of a role descriptor is the same as the request for the create API keys API. */ role_descriptors?: Record /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { expiration?: never, ids?: never, metadata?: never, role_descriptors?: never } @@ -21604,13 +31335,17 @@ export interface SecurityBulkUpdateApiKeysResponse { } export interface SecurityChangePasswordRequest extends RequestBase { -/** The user whose password you want to change. If you do not specify this parameter, the password is changed for the current user. */ + /** The user whose password you want to change. If you do not specify this + * parameter, the password is changed for the current user. */ username?: Username /** If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. */ refresh?: Refresh /** The new password value. Passwords must be at least 6 characters long. */ password?: Password - /** A hash of the new password value. This must be produced using the same hashing algorithm as has been configured for password storage. For more details, see the explanation of the `xpack.security.authc.password_hashing.algorithm` setting. */ + /** A hash of the new password value. This must be produced using the same + * hashing algorithm as has been configured for password storage. For more details, + * see the explanation of the `xpack.security.authc.password_hashing.algorithm` + * setting. */ password_hash?: string /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { username?: never, refresh?: never, password?: never, password_hash?: never } @@ -21622,7 +31357,9 @@ export interface SecurityChangePasswordResponse { } export interface SecurityClearApiKeyCacheRequest extends RequestBase { -/** Comma-separated list of API key IDs to evict from the API key cache. To evict all API keys, use `*`. Does not support other wildcard patterns. */ + /** Comma-separated list of API key IDs to evict from the API key cache. + * To evict all API keys, use `*`. + * Does not support other wildcard patterns. */ ids: Ids /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { ids?: never } @@ -21637,7 +31374,9 @@ export interface SecurityClearApiKeyCacheResponse { } export interface SecurityClearCachedPrivilegesRequest extends RequestBase { -/** A comma-separated list of applications. To clear all applications, use an asterism (`*`). It does not support other wildcard patterns. */ + /** A comma-separated list of applications. + * To clear all applications, use an asterism (`*`). + * It does not support other wildcard patterns. */ application: Name /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { application?: never } @@ -21652,9 +31391,12 @@ export interface SecurityClearCachedPrivilegesResponse { } export interface SecurityClearCachedRealmsRequest extends RequestBase { -/** A comma-separated list of realms. To clear all realms, use an asterisk (`*`). It does not support other wildcard patterns. */ + /** A comma-separated list of realms. + * To clear all realms, use an asterisk (`*`). + * It does not support other wildcard patterns. */ realms: Names - /** A comma-separated list of the users to clear from the cache. If you do not specify this parameter, the API evicts all users from the user cache. */ + /** A comma-separated list of the users to clear from the cache. + * If you do not specify this parameter, the API evicts all users from the user cache. */ usernames?: string[] /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { realms?: never, usernames?: never } @@ -21669,7 +31411,9 @@ export interface SecurityClearCachedRealmsResponse { } export interface SecurityClearCachedRolesRequest extends RequestBase { -/** A comma-separated list of roles to evict from the role cache. To evict all roles, use an asterisk (`*`). It does not support other wildcard patterns. */ + /** A comma-separated list of roles to evict from the role cache. + * To evict all roles, use an asterisk (`*`). + * It does not support other wildcard patterns. */ name: Names /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { name?: never } @@ -21684,11 +31428,13 @@ export interface SecurityClearCachedRolesResponse { } export interface SecurityClearCachedServiceTokensRequest extends RequestBase { -/** The namespace, which is a top-level grouping of service accounts. */ + /** The namespace, which is a top-level grouping of service accounts. */ namespace: Namespace /** The name of the service, which must be unique within its namespace. */ service: Service - /** A comma-separated list of token names to evict from the service account token caches. Use a wildcard (`*`) to evict all tokens that belong to a service account. It does not support other wildcard patterns. */ + /** A comma-separated list of token names to evict from the service account token caches. + * Use a wildcard (`*`) to evict all tokens that belong to a service account. + * It does not support other wildcard patterns. */ name: Names /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { namespace?: never, service?: never, name?: never } @@ -21703,13 +31449,22 @@ export interface SecurityClearCachedServiceTokensResponse { } export interface SecurityCreateApiKeyRequest extends RequestBase { -/** If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. */ + /** If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. */ refresh?: Refresh - /** The expiration time for the API key. By default, API keys never expire. */ + /** The expiration time for the API key. + * By default, API keys never expire. */ expiration?: Duration /** A name for the API key. */ name?: Name - /** An array of role descriptors for this API key. When it is not specified or it is an empty array, the API key will have a point in time snapshot of permissions of the authenticated user. If you supply role descriptors, the resultant permissions are an intersection of API keys permissions and the authenticated user's permissions thereby limiting the access scope for API keys. The structure of role descriptor is the same as the request for the create role API. For more details, refer to the create or update roles API. NOTE: Due to the way in which this permission intersection is calculated, it is not possible to create an API key that is a child of another API key, unless the derived key is created without any privileges. In this case, you must explicitly specify a role descriptor with no privileges. The derived API key can be used for authentication; it will not have authority to call Elasticsearch APIs. */ + /** An array of role descriptors for this API key. + * When it is not specified or it is an empty array, the API key will have a point in time snapshot of permissions of the authenticated user. + * If you supply role descriptors, the resultant permissions are an intersection of API keys permissions and the authenticated user's permissions thereby limiting the access scope for API keys. + * The structure of role descriptor is the same as the request for the create role API. + * For more details, refer to the create or update roles API. + * + * NOTE: Due to the way in which this permission intersection is calculated, it is not possible to create an API key that is a child of another API key, unless the derived key is created without any privileges. + * In this case, you must explicitly specify a role descriptor with no privileges. + * The derived API key can be used for authentication; it will not have authority to call Elasticsearch APIs. */ role_descriptors?: Record /** Arbitrary metadata that you want to associate with the API key. It supports nested data structure. Within the metadata object, keys beginning with `_` are reserved for system usage. */ metadata?: Metadata @@ -21720,19 +31475,34 @@ export interface SecurityCreateApiKeyRequest extends RequestBase { } export interface SecurityCreateApiKeyResponse { + /** Generated API key. */ api_key: string + /** Expiration in milliseconds for the API key. */ expiration?: long + /** Unique ID for this API key. */ id: Id + /** Specifies the name for this API key. */ name: Name + /** API key credentials which is the base64-encoding of + * the UTF-8 representation of `id` and `api_key` joined + * by a colon (`:`). */ encoded: string } export interface SecurityCreateCrossClusterApiKeyRequest extends RequestBase { -/** The access to be granted to this API key. The access is composed of permissions for cross-cluster search and cross-cluster replication. At least one of them must be specified. NOTE: No explicit privileges should be specified for either search or replication access. The creation process automatically converts the access specification to a role descriptor which has relevant privileges assigned accordingly. */ + /** The access to be granted to this API key. + * The access is composed of permissions for cross-cluster search and cross-cluster replication. + * At least one of them must be specified. + * + * NOTE: No explicit privileges should be specified for either search or replication access. + * The creation process automatically converts the access specification to a role descriptor which has relevant privileges assigned accordingly. */ access: SecurityAccess - /** Expiration time for the API key. By default, API keys never expire. */ + /** Expiration time for the API key. + * By default, API keys never expire. */ expiration?: Duration - /** Arbitrary metadata that you want to associate with the API key. It supports nested data structure. Within the metadata object, keys beginning with `_` are reserved for system usage. */ + /** Arbitrary metadata that you want to associate with the API key. + * It supports nested data structure. + * Within the metadata object, keys beginning with `_` are reserved for system usage. */ metadata?: Metadata /** Specifies the name for this API key. */ name: Name @@ -21743,19 +31513,33 @@ export interface SecurityCreateCrossClusterApiKeyRequest extends RequestBase { } export interface SecurityCreateCrossClusterApiKeyResponse { + /** Generated API key. */ api_key: string + /** Expiration in milliseconds for the API key. */ expiration?: DurationValue + /** Unique ID for this API key. */ id: Id + /** Specifies the name for this API key. */ name: Name + /** API key credentials which is the base64-encoding of + * the UTF-8 representation of `id` and `api_key` joined + * by a colon (`:`). */ encoded: string } export interface SecurityCreateServiceTokenRequest extends RequestBase { -/** The name of the namespace, which is a top-level grouping of service accounts. */ + /** The name of the namespace, which is a top-level grouping of service accounts. */ namespace: Namespace /** The name of the service. */ service: Service - /** The name for the service account token. If omitted, a random name will be generated. Token names must be at least one and no more than 256 characters. They can contain alphanumeric characters (a-z, A-Z, 0-9), dashes (`-`), and underscores (`_`), but cannot begin with an underscore. NOTE: Token names must be unique in the context of the associated service account. They must also be globally unique with their fully qualified names, which are comprised of the service account principal and token name, such as `//`. */ + /** The name for the service account token. + * If omitted, a random name will be generated. + * + * Token names must be at least one and no more than 256 characters. + * They can contain alphanumeric characters (a-z, A-Z, 0-9), dashes (`-`), and underscores (`_`), but cannot begin with an underscore. + * + * NOTE: Token names must be unique in the context of the associated service account. + * They must also be globally unique with their fully qualified names, which are comprised of the service account principal and token name, such as `//`. */ name?: Name /** If `true` then refresh the affected shards to make this operation visible to search, if `wait_for` (the default) then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. */ refresh?: Refresh @@ -21796,7 +31580,11 @@ export interface SecurityDelegatePkiAuthenticationRealm { } export interface SecurityDelegatePkiRequest extends RequestBase { -/** The X509Certificate chain, which is represented as an ordered string array. Each string in the array is a base64-encoded (Section 4 of RFC4648 - not base64url-encoded) of the certificate's DER encoding. The first element is the target certificate that contains the subject distinguished name that is requesting access. This may be followed by additional certificates; each subsequent certificate is used to certify the previous one. */ + /** The X509Certificate chain, which is represented as an ordered string array. + * Each string in the array is a base64-encoded (Section 4 of RFC4648 - not base64url-encoded) of the certificate's DER encoding. + * + * The first element is the target certificate that contains the subject distinguished name that is requesting access. + * This may be followed by additional certificates; each subsequent certificate is used to certify the previous one. */ x509_certificate_chain: string[] /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { x509_certificate_chain?: never } @@ -21805,8 +31593,11 @@ export interface SecurityDelegatePkiRequest extends RequestBase { } export interface SecurityDelegatePkiResponse { + /** An access token associated with the subject distinguished name of the client's certificate. */ access_token: string + /** The amount of time (in seconds) before the token expires. */ expires_in: long + /** The type of token. */ type: string authentication?: SecurityDelegatePkiAuthentication } @@ -21816,7 +31607,8 @@ export interface SecurityDeletePrivilegesFoundStatus { } export interface SecurityDeletePrivilegesRequest extends RequestBase { -/** The name of the application. Application privileges are always associated with exactly one application. */ + /** The name of the application. + * Application privileges are always associated with exactly one application. */ application: Name /** The name of the privilege. */ name: Names @@ -21831,7 +31623,7 @@ export interface SecurityDeletePrivilegesRequest extends RequestBase { export type SecurityDeletePrivilegesResponse = Record> export interface SecurityDeleteRoleRequest extends RequestBase { -/** The name of the role. */ + /** The name of the role. */ name: Name /** If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. */ refresh?: Refresh @@ -21842,11 +31634,14 @@ export interface SecurityDeleteRoleRequest extends RequestBase { } export interface SecurityDeleteRoleResponse { + /** If the role is successfully deleted, `found` is `true`. + * Otherwise, `found` is `false`. */ found: boolean } export interface SecurityDeleteRoleMappingRequest extends RequestBase { -/** The distinct name that identifies the role mapping. The name is used solely as an identifier to facilitate interaction via the API; it does not affect the behavior of the mapping in any way. */ + /** The distinct name that identifies the role mapping. + * The name is used solely as an identifier to facilitate interaction via the API; it does not affect the behavior of the mapping in any way. */ name: Name /** If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. */ refresh?: Refresh @@ -21857,11 +31652,13 @@ export interface SecurityDeleteRoleMappingRequest extends RequestBase { } export interface SecurityDeleteRoleMappingResponse { + /** If the mapping is successfully deleted, `found` is `true`. + * Otherwise, `found` is `false`. */ found: boolean } export interface SecurityDeleteServiceTokenRequest extends RequestBase { -/** The namespace, which is a top-level grouping of service accounts. */ + /** The namespace, which is a top-level grouping of service accounts. */ namespace: Namespace /** The service name. */ service: Service @@ -21876,11 +31673,13 @@ export interface SecurityDeleteServiceTokenRequest extends RequestBase { } export interface SecurityDeleteServiceTokenResponse { + /** If the service account token is successfully deleted, the request returns `{"found": true}`. + * Otherwise, the response will have status code 404 and `found` is set to `false`. */ found: boolean } export interface SecurityDeleteUserRequest extends RequestBase { -/** An identifier for the user. */ + /** An identifier for the user. */ username: Username /** If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. */ refresh?: Refresh @@ -21891,11 +31690,13 @@ export interface SecurityDeleteUserRequest extends RequestBase { } export interface SecurityDeleteUserResponse { + /** If the user is successfully deleted, the request returns `{"found": true}`. + * Otherwise, `found` is set to `false`. */ found: boolean } export interface SecurityDisableUserRequest extends RequestBase { -/** An identifier for the user. */ + /** An identifier for the user. */ username: Username /** If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. */ refresh?: Refresh @@ -21909,9 +31710,11 @@ export interface SecurityDisableUserResponse { } export interface SecurityDisableUserProfileRequest extends RequestBase { -/** Unique identifier for the user profile. */ + /** Unique identifier for the user profile. */ uid: SecurityUserProfileId - /** If 'true', Elasticsearch refreshes the affected shards to make this operation visible to search. If 'wait_for', it waits for a refresh to make this operation visible to search. If 'false', it does nothing with refreshes. */ + /** If 'true', Elasticsearch refreshes the affected shards to make this operation visible to search. + * If 'wait_for', it waits for a refresh to make this operation visible to search. + * If 'false', it does nothing with refreshes. */ refresh?: Refresh /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { uid?: never, refresh?: never } @@ -21922,7 +31725,7 @@ export interface SecurityDisableUserProfileRequest extends RequestBase { export type SecurityDisableUserProfileResponse = AcknowledgedResponseBase export interface SecurityEnableUserRequest extends RequestBase { -/** An identifier for the user. */ + /** An identifier for the user. */ username: Username /** If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. */ refresh?: Refresh @@ -21936,9 +31739,12 @@ export interface SecurityEnableUserResponse { } export interface SecurityEnableUserProfileRequest extends RequestBase { -/** A unique identifier for the user profile. */ + /** A unique identifier for the user profile. */ uid: SecurityUserProfileId - /** If 'true', Elasticsearch refreshes the affected shards to make this operation visible to search. If 'wait_for', it waits for a refresh to make this operation visible to search. If 'false', nothing is done with refreshes. */ + /** If 'true', Elasticsearch refreshes the affected shards to make this operation + * visible to search. + * If 'wait_for', it waits for a refresh to make this operation visible to search. + * If 'false', nothing is done with refreshes. */ refresh?: Refresh /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { uid?: never, refresh?: never } @@ -21957,11 +31763,16 @@ export interface SecurityEnrollKibanaRequest extends RequestBase { export interface SecurityEnrollKibanaResponse { token: SecurityEnrollKibanaToken + /** The CA certificate used to sign the node certificates that Elasticsearch uses for TLS on the HTTP layer. + * The certificate is returned as a Base64 encoded string of the ASN.1 DER encoding of the certificate. */ http_ca: string } export interface SecurityEnrollKibanaToken { + /** The name of the bearer token for the `elastic/kibana` service account. */ name: string + /** The value of the bearer token for the `elastic/kibana` service account. + * Use this value to authenticate the service account with Elasticsearch. */ value: string } @@ -21973,26 +31784,41 @@ export interface SecurityEnrollNodeRequest extends RequestBase { } export interface SecurityEnrollNodeResponse { + /** The CA private key that can be used by the new node in order to sign its certificate for the HTTP layer, as a Base64 encoded string of the ASN.1 DER encoding of the key. */ http_ca_key: string + /** The CA certificate that can be used by the new node in order to sign its certificate for the HTTP layer, as a Base64 encoded string of the ASN.1 DER encoding of the certificate. */ http_ca_cert: string + /** The CA certificate that is used to sign the TLS certificate for the transport layer, as a Base64 encoded string of the ASN.1 DER encoding of the certificate. */ transport_ca_cert: string + /** The private key that the node can use for TLS for its transport layer, as a Base64 encoded string of the ASN.1 DER encoding of the key. */ transport_key: string + /** The certificate that the node can use for TLS for its transport layer, as a Base64 encoded string of the ASN.1 DER encoding of the certificate. */ transport_cert: string + /** A list of transport addresses in the form of `host:port` for the nodes that are already members of the cluster. */ nodes_addresses: string[] } export interface SecurityGetApiKeyRequest extends RequestBase { -/** An API key id. This parameter cannot be used with any of `name`, `realm_name` or `username`. */ + /** An API key id. + * This parameter cannot be used with any of `name`, `realm_name` or `username`. */ id?: Id - /** An API key name. This parameter cannot be used with any of `id`, `realm_name` or `username`. It supports prefix search with wildcard. */ + /** An API key name. + * This parameter cannot be used with any of `id`, `realm_name` or `username`. + * It supports prefix search with wildcard. */ name?: Name - /** A boolean flag that can be used to query API keys owned by the currently authenticated user. The `realm_name` or `username` parameters cannot be specified when this parameter is set to `true` as they are assumed to be the currently authenticated ones. */ + /** A boolean flag that can be used to query API keys owned by the currently authenticated user. + * The `realm_name` or `username` parameters cannot be specified when this parameter is set to `true` as they are assumed to be the currently authenticated ones. */ owner?: boolean - /** The name of an authentication realm. This parameter cannot be used with either `id` or `name` or when `owner` flag is set to `true`. */ + /** The name of an authentication realm. + * This parameter cannot be used with either `id` or `name` or when `owner` flag is set to `true`. */ realm_name?: Name - /** The username of a user. This parameter cannot be used with either `id` or `name` or when `owner` flag is set to `true`. */ + /** The username of a user. + * This parameter cannot be used with either `id` or `name` or when `owner` flag is set to `true`. */ username?: Username - /** Return the snapshot of the owner user's role descriptors associated with the API key. An API key's actual permission is the intersection of its assigned role descriptors and the owner user's role descriptors. */ + /** Return the snapshot of the owner user's role descriptors + * associated with the API key. An API key's actual + * permission is the intersection of its assigned role + * descriptors and the owner user's role descriptors. */ with_limited_by?: boolean /** A boolean flag that can be used to query API keys that are currently active. An API key is considered active if it is neither invalidated, nor expired at query time. You can specify this together with other parameters such as `owner` or `name`. If `active_only` is false, the response will include both active and inactive (expired or invalidated) keys. */ active_only?: boolean @@ -22016,15 +31842,22 @@ export interface SecurityGetBuiltinPrivilegesRequest extends RequestBase { } export interface SecurityGetBuiltinPrivilegesResponse { + /** The list of cluster privileges that are understood by this version of Elasticsearch. */ cluster: SecurityClusterPrivilege[] + /** The list of index privileges that are understood by this version of Elasticsearch. */ index: IndexName[] + /** The list of remote_cluster privileges that are understood by this version of Elasticsearch. + * @remarks This property is not supported on Elastic Cloud Serverless. */ remote_cluster: SecurityRemoteClusterPrivilege[] } export interface SecurityGetPrivilegesRequest extends RequestBase { -/** The name of the application. Application privileges are always associated with exactly one application. If you do not specify this parameter, the API returns information about all privileges for all applications. */ + /** The name of the application. + * Application privileges are always associated with exactly one application. + * If you do not specify this parameter, the API returns information about all privileges for all applications. */ application?: Name - /** The name of the privilege. If you do not specify this parameter, the API returns information about all privileges for the requested application. */ + /** The name of the privilege. + * If you do not specify this parameter, the API returns information about all privileges for the requested application. */ name?: Names /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { application?: never, name?: never } @@ -22035,7 +31868,9 @@ export interface SecurityGetPrivilegesRequest extends RequestBase { export type SecurityGetPrivilegesResponse = Record> export interface SecurityGetRoleRequest extends RequestBase { -/** The name of the role. You can specify multiple roles as a comma-separated list. If you do not specify this parameter, the API returns information about all roles. */ + /** The name of the role. + * You can specify multiple roles as a comma-separated list. + * If you do not specify this parameter, the API returns information about all roles. */ name?: Names /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { name?: never } @@ -22048,10 +31883,13 @@ export type SecurityGetRoleResponse = Record export interface SecurityGetRoleRole { cluster: SecurityClusterPrivilege[] indices: SecurityIndicesPrivileges[] + /** @remarks This property is not supported on Elastic Cloud Serverless. */ remote_indices?: SecurityRemoteIndicesPrivileges[] + /** @remarks This property is not supported on Elastic Cloud Serverless. */ remote_cluster?: SecurityRemoteClusterPrivileges[] metadata: Metadata - run_as: string[] + description?: string + run_as?: string[] transient_metadata?: Record applications: SecurityApplicationPrivileges[] role_templates?: SecurityRoleTemplate[] @@ -22059,7 +31897,7 @@ export interface SecurityGetRoleRole { } export interface SecurityGetRoleMappingRequest extends RequestBase { -/** The distinct name that identifies the role mapping. The name is used solely as an identifier to facilitate interaction via the API; it does not affect the behavior of the mapping in any way. You can specify multiple mapping names as a comma-separated list. If you do not specify this parameter, the API returns information about all role mappings. */ + /** The distinct name that identifies the role mapping. The name is used solely as an identifier to facilitate interaction via the API; it does not affect the behavior of the mapping in any way. You can specify multiple mapping names as a comma-separated list. If you do not specify this parameter, the API returns information about all role mappings. */ name?: Names /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { name?: never } @@ -22070,9 +31908,12 @@ export interface SecurityGetRoleMappingRequest extends RequestBase { export type SecurityGetRoleMappingResponse = Record export interface SecurityGetServiceAccountsRequest extends RequestBase { -/** The name of the namespace. Omit this parameter to retrieve information about all service accounts. If you omit this parameter, you must also omit the `service` parameter. */ + /** The name of the namespace. + * Omit this parameter to retrieve information about all service accounts. + * If you omit this parameter, you must also omit the `service` parameter. */ namespace?: Namespace - /** The service name. Omit this parameter to retrieve information about all service accounts that belong to the specified `namespace`. */ + /** The service name. + * Omit this parameter to retrieve information about all service accounts that belong to the specified `namespace`. */ service?: Service /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { namespace?: never, service?: never } @@ -22087,7 +31928,9 @@ export interface SecurityGetServiceAccountsRoleDescriptorWrapper { } export interface SecurityGetServiceCredentialsNodesCredentials { + /** General status showing how nodes respond to the above collection request */ _nodes: NodeStatistics + /** File-backed tokens collected from all nodes */ file_tokens: Record } @@ -22096,7 +31939,7 @@ export interface SecurityGetServiceCredentialsNodesCredentialsFileToken { } export interface SecurityGetServiceCredentialsRequest extends RequestBase { -/** The name of the namespace. */ + /** The name of the namespace. */ namespace: Namespace /** The service name. */ service: Name @@ -22110,11 +31953,13 @@ export interface SecurityGetServiceCredentialsResponse { service_account: string count: integer tokens: Record + /** Service account credentials collected from all nodes of the cluster. */ nodes_credentials: SecurityGetServiceCredentialsNodesCredentials } export interface SecurityGetSettingsRequest extends RequestBase { -/** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ + /** Period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { master_timeout?: never } @@ -22123,8 +31968,11 @@ export interface SecurityGetSettingsRequest extends RequestBase { } export interface SecurityGetSettingsResponse { + /** Settings for the index used for most security configuration, including native realm users and roles configured with the API. */ security: SecuritySecuritySettings + /** Settings for the index used to store profile information. */ 'security-profile': SecuritySecuritySettings + /** Settings for the index used to store tokens. */ 'security-tokens': SecuritySecuritySettings } @@ -22143,17 +31991,27 @@ export interface SecurityGetTokenAuthenticationProvider { } export interface SecurityGetTokenRequest extends RequestBase { -/** The type of grant. Supported grant types are: `password`, `_kerberos`, `client_credentials`, and `refresh_token`. */ + /** The type of grant. + * Supported grant types are: `password`, `_kerberos`, `client_credentials`, and `refresh_token`. */ grant_type?: SecurityGetTokenAccessTokenGrantType - /** The scope of the token. Currently tokens are only issued for a scope of FULL regardless of the value sent with the request. */ + /** The scope of the token. + * Currently tokens are only issued for a scope of FULL regardless of the value sent with the request. */ scope?: string - /** The user's password. If you specify the `password` grant type, this parameter is required. This parameter is not valid with any other supported grant type. */ + /** The user's password. + * If you specify the `password` grant type, this parameter is required. + * This parameter is not valid with any other supported grant type. */ password?: Password - /** The base64 encoded kerberos ticket. If you specify the `_kerberos` grant type, this parameter is required. This parameter is not valid with any other supported grant type. */ + /** The base64 encoded kerberos ticket. + * If you specify the `_kerberos` grant type, this parameter is required. + * This parameter is not valid with any other supported grant type. */ kerberos_ticket?: string - /** The string that was returned when you created the token, which enables you to extend its life. If you specify the `refresh_token` grant type, this parameter is required. This parameter is not valid with any other supported grant type. */ + /** The string that was returned when you created the token, which enables you to extend its life. + * If you specify the `refresh_token` grant type, this parameter is required. + * This parameter is not valid with any other supported grant type. */ refresh_token?: string - /** The username that identifies the user. If you specify the `password` grant type, this parameter is required. This parameter is not valid with any other supported grant type. */ + /** The username that identifies the user. + * If you specify the `password` grant type, this parameter is required. + * This parameter is not valid with any other supported grant type. */ username?: Username /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { grant_type?: never, scope?: never, password?: never, kerberos_ticket?: never, refresh_token?: never, username?: never } @@ -22177,7 +32035,7 @@ export interface SecurityGetTokenUserRealm { } export interface SecurityGetUserRequest extends RequestBase { -/** An identifier for the user. You can specify multiple usernames as a comma-separated list. If you omit this parameter, the API retrieves information about all users. */ + /** An identifier for the user. You can specify multiple usernames as a comma-separated list. If you omit this parameter, the API retrieves information about all users. */ username?: Username | Username[] /** Determines whether to retrieve the user profile UID, if it exists, for the users. */ with_profile_uid?: boolean @@ -22190,7 +32048,7 @@ export interface SecurityGetUserRequest extends RequestBase { export type SecurityGetUserResponse = Record export interface SecurityGetUserPrivilegesRequest extends RequestBase { -/** The name of the application. Application privileges are always associated with exactly one application. If you do not specify this parameter, the API returns information about all privileges for all applications. */ + /** The name of the application. Application privileges are always associated with exactly one application. If you do not specify this parameter, the API returns information about all privileges for all applications. */ application?: Name /** The name of the privilege. If you do not specify this parameter, the API returns information about all privileges for the requested application. */ priviledge?: Name @@ -22204,8 +32062,10 @@ export interface SecurityGetUserPrivilegesRequest extends RequestBase { export interface SecurityGetUserPrivilegesResponse { applications: SecurityApplicationPrivileges[] cluster: string[] + remote_cluster?: SecurityRemoteClusterPrivileges[] global: SecurityGlobalPrivilege[] indices: SecurityUserIndicesPrivileges[] + remote_indices?: SecurityRemoteUserIndicesPrivileges[] run_as: string[] } @@ -22215,9 +32075,12 @@ export interface SecurityGetUserProfileGetUserProfileErrors { } export interface SecurityGetUserProfileRequest extends RequestBase { -/** A unique identifier for the user profile. */ + /** A unique identifier for the user profile. */ uid: SecurityUserProfileId | SecurityUserProfileId[] - /** A comma-separated list of filters for the `data` field of the profile document. To return all content use `data=*`. To return a subset of content use `data=` to retrieve content nested under the specified ``. By default returns no `data` content. */ + /** A comma-separated list of filters for the `data` field of the profile document. + * To return all content use `data=*`. + * To return a subset of content use `data=` to retrieve content nested under the specified ``. + * By default returns no `data` content. */ data?: string | string[] /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { uid?: never, data?: never } @@ -22226,6 +32089,9 @@ export interface SecurityGetUserProfileRequest extends RequestBase { } export interface SecurityGetUserProfileResponse { + /** A successful call returns the JSON representation of the user profile and its internal versioning numbers. + * The API returns an empty object if no profile document is found for the provided `uid`. + * The content of the data field is not returned by default to avoid deserializing a potential large payload. */ profiles: SecurityUserProfileWithMetadata[] errors?: SecurityGetUserProfileGetUserProfileErrors } @@ -22234,21 +32100,34 @@ export type SecurityGrantApiKeyApiKeyGrantType = 'access_token' | 'password' export interface SecurityGrantApiKeyGrantApiKey { name: Name + /** Expiration time for the API key. By default, API keys never expire. */ expiration?: DurationLarge + /** The role descriptors for this API key. + * When it is not specified or is an empty array, the API key has a point in time snapshot of permissions of the specified user or access token. + * If you supply role descriptors, the resultant permissions are an intersection of API keys permissions and the permissions of the user or access token. */ role_descriptors?: Record | Record[] + /** Arbitrary metadata that you want to associate with the API key. + * It supports nested data structure. + * Within the `metadata` object, keys beginning with `_` are reserved for system usage. */ metadata?: Metadata } export interface SecurityGrantApiKeyRequest extends RequestBase { -/** The API key. */ + /** The API key. */ api_key: SecurityGrantApiKeyGrantApiKey /** The type of grant. Supported grant types are: `access_token`, `password`. */ grant_type: SecurityGrantApiKeyApiKeyGrantType - /** The user's access token. If you specify the `access_token` grant type, this parameter is required. It is not valid with other grant types. */ + /** The user's access token. + * If you specify the `access_token` grant type, this parameter is required. + * It is not valid with other grant types. */ access_token?: string - /** The user name that identifies the user. If you specify the `password` grant type, this parameter is required. It is not valid with other grant types. */ + /** The user name that identifies the user. + * If you specify the `password` grant type, this parameter is required. + * It is not valid with other grant types. */ username?: Username - /** The user's password. If you specify the `password` grant type, this parameter is required. It is not valid with other grant types. */ + /** The user's password. + * If you specify the `password` grant type, this parameter is required. + * It is not valid with other grant types. */ password?: Password /** The name of the user to be impersonated. */ run_as?: Username @@ -22267,23 +32146,32 @@ export interface SecurityGrantApiKeyResponse { } export interface SecurityHasPrivilegesApplicationPrivilegesCheck { + /** The name of the application. */ application: string + /** A list of the privileges that you want to check for the specified resources. + * It may be either application privilege names or the names of actions that are granted by those privileges */ privileges: string[] + /** A list of resource names against which the privileges should be checked. */ resources: string[] } export type SecurityHasPrivilegesApplicationsPrivileges = Record export interface SecurityHasPrivilegesIndexPrivilegesCheck { + /** A list of indices. */ names: Indices + /** A list of the privileges that you want to check for the specified indices. */ privileges: SecurityIndexPrivilege[] + /** This needs to be set to `true` (default is `false`) if using wildcards or regexps for patterns that cover restricted indices. + * Implicitly, restricted indices do not match index patterns because restricted indices usually have limited privileges and including them in pattern tests would render most such tests false. + * If restricted indices are explicitly included in the names list, privileges will be checked against them regardless of the value of `allow_restricted_indices`. */ allow_restricted_indices?: boolean } export type SecurityHasPrivilegesPrivileges = Record export interface SecurityHasPrivilegesRequest extends RequestBase { -/** Username */ + /** Username */ user?: Name application?: SecurityHasPrivilegesApplicationPrivilegesCheck[] /** A list of the cluster privileges that you want to check. */ @@ -22312,12 +32200,13 @@ export interface SecurityHasPrivilegesUserProfileHasPrivilegesUserProfileErrors export interface SecurityHasPrivilegesUserProfilePrivilegesCheck { application?: SecurityHasPrivilegesApplicationPrivilegesCheck[] + /** A list of the cluster privileges that you want to check. */ cluster?: SecurityClusterPrivilege[] index?: SecurityHasPrivilegesIndexPrivilegesCheck[] } export interface SecurityHasPrivilegesUserProfileRequest extends RequestBase { -/** A list of profile IDs. The privileges are checked for associated users of the profiles. */ + /** A list of profile IDs. The privileges are checked for associated users of the profiles. */ uids: SecurityUserProfileId[] /** An object containing all the privileges to be checked. */ privileges: SecurityHasPrivilegesUserProfilePrivilegesCheck @@ -22328,21 +32217,34 @@ export interface SecurityHasPrivilegesUserProfileRequest extends RequestBase { } export interface SecurityHasPrivilegesUserProfileResponse { + /** The subset of the requested profile IDs of the users that + * have all the requested privileges. */ has_privilege_uids: SecurityUserProfileId[] + /** The subset of the requested profile IDs for which an error + * was encountered. It does not include the missing profile IDs + * or the profile IDs of the users that do not have all the + * requested privileges. This field is absent if empty. */ errors?: SecurityHasPrivilegesUserProfileHasPrivilegesUserProfileErrors } export interface SecurityInvalidateApiKeyRequest extends RequestBase { id?: Id - /** A list of API key ids. This parameter cannot be used with any of `name`, `realm_name`, or `username`. */ + /** A list of API key ids. + * This parameter cannot be used with any of `name`, `realm_name`, or `username`. */ ids?: Id[] - /** An API key name. This parameter cannot be used with any of `ids`, `realm_name` or `username`. */ + /** An API key name. + * This parameter cannot be used with any of `ids`, `realm_name` or `username`. */ name?: Name - /** Query API keys owned by the currently authenticated user. The `realm_name` or `username` parameters cannot be specified when this parameter is set to `true` as they are assumed to be the currently authenticated ones. NOTE: At least one of `ids`, `name`, `username`, and `realm_name` must be specified if `owner` is `false`. */ + /** Query API keys owned by the currently authenticated user. + * The `realm_name` or `username` parameters cannot be specified when this parameter is set to `true` as they are assumed to be the currently authenticated ones. + * + * NOTE: At least one of `ids`, `name`, `username`, and `realm_name` must be specified if `owner` is `false`. */ owner?: boolean - /** The name of an authentication realm. This parameter cannot be used with either `ids` or `name`, or when `owner` flag is set to `true`. */ + /** The name of an authentication realm. + * This parameter cannot be used with either `ids` or `name`, or when `owner` flag is set to `true`. */ realm_name?: string - /** The username of a user. This parameter cannot be used with either `ids` or `name` or when `owner` flag is set to `true`. */ + /** The username of a user. + * This parameter cannot be used with either `ids` or `name` or when `owner` flag is set to `true`. */ username?: Username /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { id?: never, ids?: never, name?: never, owner?: never, realm_name?: never, username?: never } @@ -22351,20 +32253,29 @@ export interface SecurityInvalidateApiKeyRequest extends RequestBase { } export interface SecurityInvalidateApiKeyResponse { + /** The number of errors that were encountered when invalidating the API keys. */ error_count: integer + /** Details about the errors. + * This field is not present in the response when `error_count` is `0`. */ error_details?: ErrorCause[] + /** The IDs of the API keys that were invalidated as part of this request. */ invalidated_api_keys: string[] + /** The IDs of the API keys that were already invalidated. */ previously_invalidated_api_keys: string[] } export interface SecurityInvalidateTokenRequest extends RequestBase { -/** An access token. This parameter cannot be used if any of `refresh_token`, `realm_name`, or `username` are used. */ + /** An access token. + * This parameter cannot be used if any of `refresh_token`, `realm_name`, or `username` are used. */ token?: string - /** A refresh token. This parameter cannot be used if any of `refresh_token`, `realm_name`, or `username` are used. */ + /** A refresh token. + * This parameter cannot be used if any of `refresh_token`, `realm_name`, or `username` are used. */ refresh_token?: string - /** The name of an authentication realm. This parameter cannot be used with either `refresh_token` or `token`. */ + /** The name of an authentication realm. + * This parameter cannot be used with either `refresh_token` or `token`. */ realm_name?: Name - /** The username of a user. This parameter cannot be used with either `refresh_token` or `token`. */ + /** The username of a user. + * This parameter cannot be used with either `refresh_token` or `token`. */ username?: Username /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { token?: never, refresh_token?: never, realm_name?: never, username?: never } @@ -22373,20 +32284,29 @@ export interface SecurityInvalidateTokenRequest extends RequestBase { } export interface SecurityInvalidateTokenResponse { + /** The number of errors that were encountered when invalidating the tokens. */ error_count: long + /** Details about the errors. + * This field is not present in the response when `error_count` is `0`. */ error_details?: ErrorCause[] + /** The number of the tokens that were invalidated as part of this request. */ invalidated_tokens: long + /** The number of tokens that were already invalidated. */ previously_invalidated_tokens: long } export interface SecurityOidcAuthenticateRequest extends RequestBase { -/** Associate a client session with an ID token and mitigate replay attacks. This value needs to be the same as the one that was provided to the `/_security/oidc/prepare` API or the one that was generated by Elasticsearch and included in the response to that call. */ + /** Associate a client session with an ID token and mitigate replay attacks. + * This value needs to be the same as the one that was provided to the `/_security/oidc/prepare` API or the one that was generated by Elasticsearch and included in the response to that call. */ nonce: string - /** The name of the OpenID Connect realm. This property is useful in cases where multiple realms are defined. */ + /** The name of the OpenID Connect realm. + * This property is useful in cases where multiple realms are defined. */ realm?: string - /** The URL to which the OpenID Connect Provider redirected the User Agent in response to an authentication request after a successful authentication. This URL must be provided as-is (URL encoded), taken from the body of the response or as the value of a location header in the response from the OpenID Connect Provider. */ + /** The URL to which the OpenID Connect Provider redirected the User Agent in response to an authentication request after a successful authentication. + * This URL must be provided as-is (URL encoded), taken from the body of the response or as the value of a location header in the response from the OpenID Connect Provider. */ redirect_uri: string - /** Maintain state between the authentication request and the response. This value needs to be the same as the one that was provided to the `/_security/oidc/prepare` API or the one that was generated by Elasticsearch and included in the response to that call. */ + /** Maintain state between the authentication request and the response. + * This value needs to be the same as the one that was provided to the `/_security/oidc/prepare` API or the one that was generated by Elasticsearch and included in the response to that call. */ state: string /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { nonce?: never, realm?: never, redirect_uri?: never, state?: never } @@ -22395,37 +32315,49 @@ export interface SecurityOidcAuthenticateRequest extends RequestBase { } export interface SecurityOidcAuthenticateResponse { + /** The Elasticsearch access token. */ access_token: string + /** The duration (in seconds) of the tokens. */ expires_in: integer + /** The Elasticsearch refresh token. */ refresh_token: string + /** The type of token. */ type: string } export interface SecurityOidcLogoutRequest extends RequestBase { -/** The access token to be invalidated. */ - access_token: string + /** The access token to be invalidated. */ + token: string /** The refresh token to be invalidated. */ refresh_token?: string /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { access_token?: never, refresh_token?: never } + body?: string | { [key: string]: any } & { token?: never, refresh_token?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { access_token?: never, refresh_token?: never } + querystring?: { [key: string]: any } & { token?: never, refresh_token?: never } } export interface SecurityOidcLogoutResponse { + /** A URI that points to the end session endpoint of the OpenID Connect Provider with all the parameters of the logout request as HTTP GET parameters. */ redirect: string } export interface SecurityOidcPrepareAuthenticationRequest extends RequestBase { -/** In the case of a third party initiated single sign on, this is the issuer identifier for the OP that the RP is to send the authentication request to. It cannot be specified when *realm* is specified. One of *realm* or *iss* is required. */ + /** In the case of a third party initiated single sign on, this is the issuer identifier for the OP that the RP is to send the authentication request to. + * It cannot be specified when *realm* is specified. + * One of *realm* or *iss* is required. */ iss?: string - /** In the case of a third party initiated single sign on, it is a string value that is included in the authentication request as the *login_hint* parameter. This parameter is not valid when *realm* is specified. */ + /** In the case of a third party initiated single sign on, it is a string value that is included in the authentication request as the *login_hint* parameter. + * This parameter is not valid when *realm* is specified. */ login_hint?: string - /** The value used to associate a client session with an ID token and to mitigate replay attacks. If the caller of the API does not provide a value, Elasticsearch will generate one with sufficient entropy and return it in the response. */ + /** The value used to associate a client session with an ID token and to mitigate replay attacks. + * If the caller of the API does not provide a value, Elasticsearch will generate one with sufficient entropy and return it in the response. */ nonce?: string - /** The name of the OpenID Connect realm in Elasticsearch the configuration of which should be used in order to generate the authentication request. It cannot be specified when *iss* is specified. One of *realm* or *iss* is required. */ + /** The name of the OpenID Connect realm in Elasticsearch the configuration of which should be used in order to generate the authentication request. + * It cannot be specified when *iss* is specified. + * One of *realm* or *iss* is required. */ realm?: string - /** The value used to maintain state between the authentication request and the response, typically used as a Cross-Site Request Forgery mitigation. If the caller of the API does not provide a value, Elasticsearch will generate one with sufficient entropy and return it in the response. */ + /** The value used to maintain state between the authentication request and the response, typically used as a Cross-Site Request Forgery mitigation. + * If the caller of the API does not provide a value, Elasticsearch will generate one with sufficient entropy and return it in the response. */ state?: string /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { iss?: never, login_hint?: never, nonce?: never, realm?: never, state?: never } @@ -22436,6 +32368,7 @@ export interface SecurityOidcPrepareAuthenticationRequest extends RequestBase { export interface SecurityOidcPrepareAuthenticationResponse { nonce: string realm: string + /** A URI that points to the authorization endpoint of the OpenID Connect Provider with all the parameters of the authentication request as HTTP GET parameters. */ redirect: string state: string } @@ -22448,7 +32381,7 @@ export interface SecurityPutPrivilegesActions { } export interface SecurityPutPrivilegesRequest extends RequestBase { -/** If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. */ + /** If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. */ refresh?: Refresh privileges?: Record> /** All values in `body` will be added to the request body. */ @@ -22460,7 +32393,7 @@ export interface SecurityPutPrivilegesRequest extends RequestBase { export type SecurityPutPrivilegesResponse = Record> export interface SecurityPutRoleRequest extends RequestBase { -/** The name of the role that is being created or updated. On Elasticsearch Serverless, the role name must begin with a letter or digit and can only contain letters, digits and the characters '_', '-', and '.'. Each role must have a unique name, as this will serve as the identifier for that role. */ + /** The name of the role that is being created or updated. On Elasticsearch Serverless, the role name must begin with a letter or digit and can only contain letters, digits and the characters '_', '-', and '.'. Each role must have a unique name, as this will serve as the identifier for that role. */ name: Name /** If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. */ refresh?: Refresh @@ -22468,13 +32401,19 @@ export interface SecurityPutRoleRequest extends RequestBase { applications?: SecurityApplicationPrivileges[] /** A list of cluster privileges. These privileges define the cluster-level actions for users with this role. */ cluster?: SecurityClusterPrivilege[] - /** An object defining global privileges. A global privilege is a form of cluster privilege that is request-aware. Support for global privileges is currently limited to the management of application privileges. */ + /** An object defining global privileges. A global privilege is a form of cluster privilege that is request-aware. Support for global privileges is currently limited to the management of application privileges. + * @remarks This property is not supported on Elastic Cloud Serverless. */ global?: Record /** A list of indices permissions entries. */ indices?: SecurityIndicesPrivileges[] - /** A list of remote indices permissions entries. NOTE: Remote indices are effective for remote clusters configured with the API key based model. They have no effect for remote clusters configured with the certificate based model. */ + /** A list of remote indices permissions entries. + * + * NOTE: Remote indices are effective for remote clusters configured with the API key based model. + * They have no effect for remote clusters configured with the certificate based model. + * @remarks This property is not supported on Elastic Cloud Serverless. */ remote_indices?: SecurityRemoteIndicesPrivileges[] - /** A list of remote cluster permissions entries. */ + /** A list of remote cluster permissions entries. + * @remarks This property is not supported on Elastic Cloud Serverless. */ remote_cluster?: SecurityRemoteClusterPrivileges[] /** Optional metadata. Within the metadata object, keys that begin with an underscore (`_`) are reserved for system use. */ metadata?: Metadata @@ -22491,23 +32430,29 @@ export interface SecurityPutRoleRequest extends RequestBase { } export interface SecurityPutRoleResponse { + /** When an existing role is updated, `created` is set to `false`. */ role: SecurityCreatedStatus } export interface SecurityPutRoleMappingRequest extends RequestBase { -/** The distinct name that identifies the role mapping. The name is used solely as an identifier to facilitate interaction via the API; it does not affect the behavior of the mapping in any way. */ + /** The distinct name that identifies the role mapping. + * The name is used solely as an identifier to facilitate interaction via the API; it does not affect the behavior of the mapping in any way. */ name: Name /** If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. */ refresh?: Refresh /** Mappings that have `enabled` set to `false` are ignored when role mapping is performed. */ enabled?: boolean - /** Additional metadata that helps define which roles are assigned to each user. Within the metadata object, keys beginning with `_` are reserved for system usage. */ + /** Additional metadata that helps define which roles are assigned to each user. + * Within the metadata object, keys beginning with `_` are reserved for system usage. */ metadata?: Metadata - /** A list of role names that are granted to the users that match the role mapping rules. Exactly one of `roles` or `role_templates` must be specified. */ + /** A list of role names that are granted to the users that match the role mapping rules. + * Exactly one of `roles` or `role_templates` must be specified. */ roles?: string[] - /** A list of Mustache templates that will be evaluated to determine the roles names that should granted to the users that match the role mapping rules. Exactly one of `roles` or `role_templates` must be specified. */ + /** A list of Mustache templates that will be evaluated to determine the roles names that should granted to the users that match the role mapping rules. + * Exactly one of `roles` or `role_templates` must be specified. */ role_templates?: SecurityRoleTemplate[] - /** The rules that determine which users should be matched by the mapping. A rule is a logical condition that is expressed by using a JSON DSL. */ + /** The rules that determine which users should be matched by the mapping. + * A rule is a logical condition that is expressed by using a JSON DSL. */ rules?: SecurityRoleMappingRule run_as?: string[] /** All values in `body` will be added to the request body. */ @@ -22522,9 +32467,14 @@ export interface SecurityPutRoleMappingResponse { } export interface SecurityPutUserRequest extends RequestBase { -/** An identifier for the user. NOTE: Usernames must be at least 1 and no more than 507 characters. They can contain alphanumeric characters (a-z, A-Z, 0-9), spaces, punctuation, and printable symbols in the Basic Latin (ASCII) block. Leading or trailing whitespace is not allowed. */ + /** An identifier for the user. + * + * NOTE: Usernames must be at least 1 and no more than 507 characters. + * They can contain alphanumeric characters (a-z, A-Z, 0-9), spaces, punctuation, and printable symbols in the Basic Latin (ASCII) block. + * Leading or trailing whitespace is not allowed. */ username: Username - /** Valid values are `true`, `false`, and `wait_for`. These values have the same meaning as in the index API, but the default value for this API is true. */ + /** Valid values are `true`, `false`, and `wait_for`. + * These values have the same meaning as in the index API, but the default value for this API is true. */ refresh?: Refresh /** The email of the user. */ email?: string | null @@ -22532,11 +32482,20 @@ export interface SecurityPutUserRequest extends RequestBase { full_name?: string | null /** Arbitrary metadata that you want to associate with the user. */ metadata?: Metadata - /** The user's password. Passwords must be at least 6 characters long. When adding a user, one of `password` or `password_hash` is required. When updating an existing user, the password is optional, so that other fields on the user (such as their roles) may be updated without modifying the user's password */ + /** The user's password. + * Passwords must be at least 6 characters long. + * When adding a user, one of `password` or `password_hash` is required. + * When updating an existing user, the password is optional, so that other fields on the user (such as their roles) may be updated without modifying the user's password */ password?: Password - /** A hash of the user's password. This must be produced using the same hashing algorithm as has been configured for password storage. For more details, see the explanation of the `xpack.security.authc.password_hashing.algorithm` setting in the user cache and password hash algorithm documentation. Using this parameter allows the client to pre-hash the password for performance and/or confidentiality reasons. The `password` parameter and the `password_hash` parameter cannot be used in the same request. */ + /** A hash of the user's password. + * This must be produced using the same hashing algorithm as has been configured for password storage. + * For more details, see the explanation of the `xpack.security.authc.password_hashing.algorithm` setting in the user cache and password hash algorithm documentation. + * Using this parameter allows the client to pre-hash the password for performance and/or confidentiality reasons. + * The `password` parameter and the `password_hash` parameter cannot be used in the same request. */ password_hash?: string - /** A set of roles the user has. The roles determine the user's access permissions. To create a user without any roles, specify an empty list (`[]`). */ + /** A set of roles the user has. + * The roles determine the user's access permissions. + * To create a user without any roles, specify an empty list (`[]`). */ roles?: string[] /** Specifies whether the user is enabled. */ enabled?: boolean @@ -22547,66 +32506,131 @@ export interface SecurityPutUserRequest extends RequestBase { } export interface SecurityPutUserResponse { + /** A successful call returns a JSON structure that shows whether the user has been created or updated. + * When an existing user is updated, `created` is set to `false`. */ created: boolean } export type SecurityQueryApiKeysApiKeyAggregate = AggregationsCardinalityAggregate | AggregationsValueCountAggregate | AggregationsStringTermsAggregate | AggregationsLongTermsAggregate | AggregationsDoubleTermsAggregate | AggregationsUnmappedTermsAggregate | AggregationsMultiTermsAggregate | AggregationsMissingAggregate | AggregationsFilterAggregate | AggregationsFiltersAggregate | AggregationsRangeAggregate | AggregationsDateRangeAggregate | AggregationsCompositeAggregate export interface SecurityQueryApiKeysApiKeyAggregationContainer { + /** Sub-aggregations for this aggregation. + * Only applies to bucket aggregations. */ aggregations?: Record + /** Sub-aggregations for this aggregation. + * Only applies to bucket aggregations. + * @alias aggregations */ aggs?: Record meta?: Metadata + /** A single-value metrics aggregation that calculates an approximate count of distinct values. */ cardinality?: AggregationsCardinalityAggregation + /** A multi-bucket aggregation that creates composite buckets from different sources. + * Unlike the other multi-bucket aggregations, you can use the `composite` aggregation to paginate *all* buckets from a multi-level aggregation efficiently. */ composite?: AggregationsCompositeAggregation + /** A multi-bucket value source based aggregation that enables the user to define a set of date ranges - each representing a bucket. */ date_range?: AggregationsDateRangeAggregation + /** A single bucket aggregation that narrows the set of documents to those that match a query. */ filter?: SecurityQueryApiKeysApiKeyQueryContainer + /** A multi-bucket aggregation where each bucket contains the documents that match a query. */ filters?: SecurityQueryApiKeysApiKeyFiltersAggregation missing?: AggregationsMissingAggregation + /** A multi-bucket value source based aggregation that enables the user to define a set of ranges - each representing a bucket. */ range?: AggregationsRangeAggregation + /** A multi-bucket value source based aggregation where buckets are dynamically built - one per unique value. */ terms?: AggregationsTermsAggregation + /** A single-value metrics aggregation that counts the number of values that are extracted from the aggregated documents. */ value_count?: AggregationsValueCountAggregation } export interface SecurityQueryApiKeysApiKeyFiltersAggregation extends AggregationsBucketAggregationBase { + /** Collection of queries from which to build buckets. */ filters?: AggregationsBuckets + /** Set to `true` to add a bucket to the response which will contain all documents that do not match any of the given filters. */ other_bucket?: boolean + /** The key with which the other bucket is returned. */ other_bucket_key?: string + /** By default, the named filters aggregation returns the buckets as an object. + * Set to `false` to return the buckets as an array of objects. */ keyed?: boolean } export interface SecurityQueryApiKeysApiKeyQueryContainer { + /** Matches documents matching boolean combinations of other queries. */ bool?: QueryDslBoolQuery + /** Returns documents that contain an indexed value for a field. */ exists?: QueryDslExistsQuery + /** Returns documents based on their IDs. + * This query uses document IDs stored in the `_id` field. */ ids?: QueryDslIdsQuery + /** Returns documents that match a provided text, number, date or boolean value. + * The provided text is analyzed before matching. */ match?: Partial> + /** Matches all documents, giving them all a `_score` of 1.0. */ match_all?: QueryDslMatchAllQuery + /** Returns documents that contain a specific prefix in a provided field. */ prefix?: Partial> + /** Returns documents that contain terms within a provided range. */ range?: Partial> + /** Returns documents based on a provided query string, using a parser with a limited but fault-tolerant syntax. */ simple_query_string?: QueryDslSimpleQueryStringQuery + /** Returns documents that contain an exact term in a provided field. + * To return a document, the query term must exactly match the queried field's value, including whitespace and capitalization. */ term?: Partial> + /** Returns documents that contain one or more exact terms in a provided field. + * To return a document, one or more terms must exactly match a field value, including whitespace and capitalization. */ terms?: QueryDslTermsQuery + /** Returns documents that contain terms matching a wildcard pattern. */ wildcard?: Partial> } export interface SecurityQueryApiKeysRequest extends RequestBase { -/** Return the snapshot of the owner user's role descriptors associated with the API key. An API key's actual permission is the intersection of its assigned role descriptors and the owner user's role descriptors (effectively limited by it). An API key cannot retrieve any API key’s limited-by role descriptors (including itself) unless it has `manage_api_key` or higher privileges. */ + /** Return the snapshot of the owner user's role descriptors associated with the API key. + * An API key's actual permission is the intersection of its assigned role descriptors and the owner user's role descriptors (effectively limited by it). + * An API key cannot retrieve any API key’s limited-by role descriptors (including itself) unless it has `manage_api_key` or higher privileges. */ with_limited_by?: boolean - /** Determines whether to also retrieve the profile UID for the API key owner principal. If it exists, the profile UID is returned under the `profile_uid` response field for each API key. */ + /** Determines whether to also retrieve the profile UID for the API key owner principal. + * If it exists, the profile UID is returned under the `profile_uid` response field for each API key. */ with_profile_uid?: boolean /** Determines whether aggregation names are prefixed by their respective types in the response. */ typed_keys?: boolean - /** Any aggregations to run over the corpus of returned API keys. Aggregations and queries work together. Aggregations are computed only on the API keys that match the query. This supports only a subset of aggregation types, namely: `terms`, `range`, `date_range`, `missing`, `cardinality`, `value_count`, `composite`, `filter`, and `filters`. Additionally, aggregations only run over the same subset of fields that query works with. */ + /** Any aggregations to run over the corpus of returned API keys. + * Aggregations and queries work together. Aggregations are computed only on the API keys that match the query. + * This supports only a subset of aggregation types, namely: `terms`, `range`, `date_range`, `missing`, + * `cardinality`, `value_count`, `composite`, `filter`, and `filters`. + * Additionally, aggregations only run over the same subset of fields that query works with. */ aggregations?: Record - /** @alias aggregations */ - /** Any aggregations to run over the corpus of returned API keys. Aggregations and queries work together. Aggregations are computed only on the API keys that match the query. This supports only a subset of aggregation types, namely: `terms`, `range`, `date_range`, `missing`, `cardinality`, `value_count`, `composite`, `filter`, and `filters`. Additionally, aggregations only run over the same subset of fields that query works with. */ + /** Any aggregations to run over the corpus of returned API keys. + * Aggregations and queries work together. Aggregations are computed only on the API keys that match the query. + * This supports only a subset of aggregation types, namely: `terms`, `range`, `date_range`, `missing`, + * `cardinality`, `value_count`, `composite`, `filter`, and `filters`. + * Additionally, aggregations only run over the same subset of fields that query works with. + * @alias aggregations */ aggs?: Record - /** A query to filter which API keys to return. If the query parameter is missing, it is equivalent to a `match_all` query. The query supports a subset of query types, including `match_all`, `bool`, `term`, `terms`, `match`, `ids`, `prefix`, `wildcard`, `exists`, `range`, and `simple_query_string`. You can query the following public information associated with an API key: `id`, `type`, `name`, `creation`, `expiration`, `invalidated`, `invalidation`, `username`, `realm`, and `metadata`. NOTE: The queryable string values associated with API keys are internally mapped as keywords. Consequently, if no `analyzer` parameter is specified for a `match` query, then the provided match query string is interpreted as a single keyword value. Such a match query is hence equivalent to a `term` query. */ + /** A query to filter which API keys to return. + * If the query parameter is missing, it is equivalent to a `match_all` query. + * The query supports a subset of query types, including `match_all`, `bool`, `term`, `terms`, `match`, + * `ids`, `prefix`, `wildcard`, `exists`, `range`, and `simple_query_string`. + * You can query the following public information associated with an API key: `id`, `type`, `name`, + * `creation`, `expiration`, `invalidated`, `invalidation`, `username`, `realm`, and `metadata`. + * + * NOTE: The queryable string values associated with API keys are internally mapped as keywords. + * Consequently, if no `analyzer` parameter is specified for a `match` query, then the provided match query string is interpreted as a single keyword value. + * Such a match query is hence equivalent to a `term` query. */ query?: SecurityQueryApiKeysApiKeyQueryContainer - /** The starting document offset. It must not be negative. By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the `search_after` parameter. */ + /** The starting document offset. + * It must not be negative. + * By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. + * To page through more hits, use the `search_after` parameter. */ from?: integer - /** The sort definition. Other than `id`, all public fields of an API key are eligible for sorting. In addition, sort can also be applied to the `_doc` field to sort by index order. */ + /** The sort definition. + * Other than `id`, all public fields of an API key are eligible for sorting. + * In addition, sort can also be applied to the `_doc` field to sort by index order. */ sort?: Sort - /** The number of hits to return. It must not be negative. The `size` parameter can be set to `0`, in which case no API key matches are returned, only the aggregation results. By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the `search_after` parameter. */ + /** The number of hits to return. + * It must not be negative. + * The `size` parameter can be set to `0`, in which case no API key matches are returned, only the aggregation results. + * By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. + * To page through more hits, use the `search_after` parameter. */ size?: integer /** The search after definition. */ search_after?: SortResults @@ -22617,25 +32641,43 @@ export interface SecurityQueryApiKeysRequest extends RequestBase { } export interface SecurityQueryApiKeysResponse { + /** The total number of API keys found. */ total: integer + /** The number of API keys returned in the response. */ count: integer + /** A list of API key information. */ api_keys: SecurityApiKey[] + /** The aggregations result, if requested. */ aggregations?: Record } export interface SecurityQueryRoleQueryRole extends SecurityRoleDescriptor { _sort?: SortResults + /** Name of the role. */ name: string } export interface SecurityQueryRoleRequest extends RequestBase { -/** A query to filter which roles to return. If the query parameter is missing, it is equivalent to a `match_all` query. The query supports a subset of query types, including `match_all`, `bool`, `term`, `terms`, `match`, `ids`, `prefix`, `wildcard`, `exists`, `range`, and `simple_query_string`. You can query the following information associated with roles: `name`, `description`, `metadata`, `applications.application`, `applications.privileges`, and `applications.resources`. */ + /** A query to filter which roles to return. + * If the query parameter is missing, it is equivalent to a `match_all` query. + * The query supports a subset of query types, including `match_all`, `bool`, `term`, `terms`, `match`, + * `ids`, `prefix`, `wildcard`, `exists`, `range`, and `simple_query_string`. + * You can query the following information associated with roles: `name`, `description`, `metadata`, + * `applications.application`, `applications.privileges`, and `applications.resources`. */ query?: SecurityQueryRoleRoleQueryContainer - /** The starting document offset. It must not be negative. By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the `search_after` parameter. */ + /** The starting document offset. + * It must not be negative. + * By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. + * To page through more hits, use the `search_after` parameter. */ from?: integer - /** The sort definition. You can sort on `username`, `roles`, or `enabled`. In addition, sort can also be applied to the `_doc` field to sort by index order. */ + /** The sort definition. + * You can sort on `username`, `roles`, or `enabled`. + * In addition, sort can also be applied to the `_doc` field to sort by index order. */ sort?: Sort - /** The number of hits to return. It must not be negative. By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the `search_after` parameter. */ + /** The number of hits to return. + * It must not be negative. + * By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. + * To page through more hits, use the `search_after` parameter. */ size?: integer /** The search after definition. */ search_after?: SortResults @@ -22646,22 +32688,45 @@ export interface SecurityQueryRoleRequest extends RequestBase { } export interface SecurityQueryRoleResponse { + /** The total number of roles found. */ total: integer + /** The number of roles returned in the response. */ count: integer + /** A list of roles that match the query. + * The returned role format is an extension of the role definition format. + * It adds the `transient_metadata.enabled` and the `_sort` fields. + * `transient_metadata.enabled` is set to `false` in case the role is automatically disabled, for example when the role grants privileges that are not allowed by the installed license. + * `_sort` is present when the search query sorts on some field. + * It contains the array of values that have been used for sorting. */ roles: SecurityQueryRoleQueryRole[] } export interface SecurityQueryRoleRoleQueryContainer { + /** matches roles matching boolean combinations of other queries. */ bool?: QueryDslBoolQuery + /** Returns roles that contain an indexed value for a field. */ exists?: QueryDslExistsQuery + /** Returns roles based on their IDs. + * This query uses role document IDs stored in the `_id` field. */ ids?: QueryDslIdsQuery + /** Returns roles that match a provided text, number, date or boolean value. + * The provided text is analyzed before matching. */ match?: Partial> + /** Matches all roles, giving them all a `_score` of 1.0. */ match_all?: QueryDslMatchAllQuery + /** Returns roles that contain a specific prefix in a provided field. */ prefix?: Partial> + /** Returns roles that contain terms within a provided range. */ range?: Partial> + /** Returns roles based on a provided query string, using a parser with a limited but fault-tolerant syntax. */ simple_query_string?: QueryDslSimpleQueryStringQuery + /** Returns roles that contain an exact term in a provided field. + * To return a document, the query term must exactly match the queried field's value, including whitespace and capitalization. */ term?: Partial> + /** Returns roles that contain one or more exact terms in a provided field. + * To return a document, one or more terms must exactly match a field value, including whitespace and capitalization. */ terms?: QueryDslTermsQuery + /** Returns roles that contain terms matching a wildcard pattern. */ wildcard?: Partial> } @@ -22670,15 +32735,27 @@ export interface SecurityQueryUserQueryUser extends SecurityUser { } export interface SecurityQueryUserRequest extends RequestBase { -/** Determines whether to retrieve the user profile UID, if it exists, for the users. */ + /** Determines whether to retrieve the user profile UID, if it exists, for the users. */ with_profile_uid?: boolean - /** A query to filter which users to return. If the query parameter is missing, it is equivalent to a `match_all` query. The query supports a subset of query types, including `match_all`, `bool`, `term`, `terms`, `match`, `ids`, `prefix`, `wildcard`, `exists`, `range`, and `simple_query_string`. You can query the following information associated with user: `username`, `roles`, `enabled`, `full_name`, and `email`. */ + /** A query to filter which users to return. + * If the query parameter is missing, it is equivalent to a `match_all` query. + * The query supports a subset of query types, including `match_all`, `bool`, `term`, `terms`, `match`, + * `ids`, `prefix`, `wildcard`, `exists`, `range`, and `simple_query_string`. + * You can query the following information associated with user: `username`, `roles`, `enabled`, `full_name`, and `email`. */ query?: SecurityQueryUserUserQueryContainer - /** The starting document offset. It must not be negative. By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the `search_after` parameter. */ + /** The starting document offset. + * It must not be negative. + * By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. + * To page through more hits, use the `search_after` parameter. */ from?: integer - /** The sort definition. Fields eligible for sorting are: `username`, `roles`, `enabled`. In addition, sort can also be applied to the `_doc` field to sort by index order. */ + /** The sort definition. + * Fields eligible for sorting are: `username`, `roles`, `enabled`. + * In addition, sort can also be applied to the `_doc` field to sort by index order. */ sort?: Sort - /** The number of hits to return. It must not be negative. By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the `search_after` parameter. */ + /** The number of hits to return. + * It must not be negative. + * By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. + * To page through more hits, use the `search_after` parameter. */ size?: integer /** The search after definition */ search_after?: SortResults @@ -22689,27 +32766,45 @@ export interface SecurityQueryUserRequest extends RequestBase { } export interface SecurityQueryUserResponse { + /** The total number of users found. */ total: integer + /** The number of users returned in the response. */ count: integer + /** A list of users that match the query. */ users: SecurityQueryUserQueryUser[] } export interface SecurityQueryUserUserQueryContainer { + /** Returns users based on their IDs. + * This query uses the user document IDs stored in the `_id` field. */ ids?: QueryDslIdsQuery + /** matches users matching boolean combinations of other queries. */ bool?: QueryDslBoolQuery + /** Returns users that contain an indexed value for a field. */ exists?: QueryDslExistsQuery + /** Returns users that match a provided text, number, date or boolean value. + * The provided text is analyzed before matching. */ match?: Partial> + /** Matches all users, giving them all a `_score` of 1.0. */ match_all?: QueryDslMatchAllQuery + /** Returns users that contain a specific prefix in a provided field. */ prefix?: Partial> + /** Returns users that contain terms within a provided range. */ range?: Partial> + /** Returns users based on a provided query string, using a parser with a limited but fault-tolerant syntax. */ simple_query_string?: QueryDslSimpleQueryStringQuery + /** Returns users that contain an exact term in a provided field. + * To return a document, the query term must exactly match the queried field's value, including whitespace and capitalization. */ term?: Partial> + /** Returns users that contain one or more exact terms in a provided field. + * To return a document, one or more terms must exactly match a field value, including whitespace and capitalization. */ terms?: QueryDslTermsQuery + /** Returns users that contain terms matching a wildcard pattern. */ wildcard?: Partial> } export interface SecuritySamlAuthenticateRequest extends RequestBase { -/** The SAML response as it was sent by the user's browser, usually a Base64 encoded XML document. */ + /** The SAML response as it was sent by the user's browser, usually a Base64 encoded XML document. */ content: string /** A JSON array with all the valid SAML Request Ids that the caller of the API has for the current user. */ ids: Ids @@ -22722,15 +32817,20 @@ export interface SecuritySamlAuthenticateRequest extends RequestBase { } export interface SecuritySamlAuthenticateResponse { + /** The access token that was generated by Elasticsearch. */ access_token: string + /** The authenticated user's name. */ username: string + /** The amount of time (in seconds) left until the token expires. */ expires_in: integer + /** The refresh token that was generated by Elasticsearch. */ refresh_token: string + /** The name of the realm where the user was authenticated. */ realm: string } export interface SecuritySamlCompleteLogoutRequest extends RequestBase { -/** The name of the SAML realm in Elasticsearch for which the configuration is used to verify the logout response. */ + /** The name of the SAML realm in Elasticsearch for which the configuration is used to verify the logout response. */ realm: string /** A JSON array with all the valid SAML Request Ids that the caller of the API has for the current user. */ ids: Ids @@ -22747,9 +32847,13 @@ export interface SecuritySamlCompleteLogoutRequest extends RequestBase { export type SecuritySamlCompleteLogoutResponse = boolean export interface SecuritySamlInvalidateRequest extends RequestBase { -/** The Assertion Consumer Service URL that matches the one of the SAML realm in Elasticsearch that should be used. You must specify either this parameter or the `realm` parameter. */ + /** The Assertion Consumer Service URL that matches the one of the SAML realm in Elasticsearch that should be used. You must specify either this parameter or the `realm` parameter. */ acs?: string - /** The query part of the URL that the user was redirected to by the SAML IdP to initiate the Single Logout. This query should include a single parameter named `SAMLRequest` that contains a SAML logout request that is deflated and Base64 encoded. If the SAML IdP has signed the logout request, the URL should include two extra parameters named `SigAlg` and `Signature` that contain the algorithm used for the signature and the signature value itself. In order for Elasticsearch to be able to verify the IdP's signature, the value of the `query_string` field must be an exact match to the string provided by the browser. The client application must not attempt to parse or process the string in any way. */ + /** The query part of the URL that the user was redirected to by the SAML IdP to initiate the Single Logout. + * This query should include a single parameter named `SAMLRequest` that contains a SAML logout request that is deflated and Base64 encoded. + * If the SAML IdP has signed the logout request, the URL should include two extra parameters named `SigAlg` and `Signature` that contain the algorithm used for the signature and the signature value itself. + * In order for Elasticsearch to be able to verify the IdP's signature, the value of the `query_string` field must be an exact match to the string provided by the browser. + * The client application must not attempt to parse or process the string in any way. */ query_string: string /** The name of the SAML realm in Elasticsearch the configuration. You must specify either this parameter or the `acs` parameter. */ realm?: string @@ -22760,15 +32864,20 @@ export interface SecuritySamlInvalidateRequest extends RequestBase { } export interface SecuritySamlInvalidateResponse { + /** The number of tokens that were invalidated as part of this logout. */ invalidated: integer + /** The realm name of the SAML realm in Elasticsearch that authenticated the user. */ realm: string + /** A SAML logout response as a parameter so that the user can be redirected back to the SAML IdP. */ redirect: string } export interface SecuritySamlLogoutRequest extends RequestBase { -/** The access token that was returned as a response to calling the SAML authenticate API. Alternatively, the most recent token that was received after refreshing the original one by using a `refresh_token`. */ + /** The access token that was returned as a response to calling the SAML authenticate API. + * Alternatively, the most recent token that was received after refreshing the original one by using a `refresh_token`. */ token: string - /** The refresh token that was returned as a response to calling the SAML authenticate API. Alternatively, the most recent refresh token that was received after refreshing the original access token. */ + /** The refresh token that was returned as a response to calling the SAML authenticate API. + * Alternatively, the most recent refresh token that was received after refreshing the original access token. */ refresh_token?: string /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { token?: never, refresh_token?: never } @@ -22777,15 +32886,20 @@ export interface SecuritySamlLogoutRequest extends RequestBase { } export interface SecuritySamlLogoutResponse { + /** A URL that contains a SAML logout request as a parameter. + * You can use this URL to be redirected back to the SAML IdP and to initiate Single Logout. */ redirect: string } export interface SecuritySamlPrepareAuthenticationRequest extends RequestBase { -/** The Assertion Consumer Service URL that matches the one of the SAML realms in Elasticsearch. The realm is used to generate the authentication request. You must specify either this parameter or the `realm` parameter. */ + /** The Assertion Consumer Service URL that matches the one of the SAML realms in Elasticsearch. + * The realm is used to generate the authentication request. You must specify either this parameter or the `realm` parameter. */ acs?: string - /** The name of the SAML realm in Elasticsearch for which the configuration is used to generate the authentication request. You must specify either this parameter or the `acs` parameter. */ + /** The name of the SAML realm in Elasticsearch for which the configuration is used to generate the authentication request. + * You must specify either this parameter or the `acs` parameter. */ realm?: string - /** A string that will be included in the redirect URL that this API returns as the `RelayState` query parameter. If the Authentication Request is signed, this value is used as part of the signature computation. */ + /** A string that will be included in the redirect URL that this API returns as the `RelayState` query parameter. + * If the Authentication Request is signed, this value is used as part of the signature computation. */ relay_state?: string /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { acs?: never, realm?: never, relay_state?: never } @@ -22794,13 +32908,16 @@ export interface SecuritySamlPrepareAuthenticationRequest extends RequestBase { } export interface SecuritySamlPrepareAuthenticationResponse { + /** A unique identifier for the SAML Request to be stored by the caller of the API. */ id: Id + /** The name of the Elasticsearch realm that was used to construct the authentication request. */ realm: string + /** The URL to redirect the user to. */ redirect: string } export interface SecuritySamlServiceProviderMetadataRequest extends RequestBase { -/** The name of the SAML realm in Elasticsearch. */ + /** The name of the SAML realm in Elasticsearch. */ realm_name: Name /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { realm_name?: never } @@ -22809,22 +32926,34 @@ export interface SecuritySamlServiceProviderMetadataRequest extends RequestBase } export interface SecuritySamlServiceProviderMetadataResponse { + /** An XML string that contains a SAML Service Provider's metadata for the realm. */ metadata: string } export interface SecuritySuggestUserProfilesHint { + /** A list of profile UIDs to match against. */ uids?: SecurityUserProfileId[] + /** A single key-value pair to match against the labels section + * of a profile. A profile is considered matching if it matches + * at least one of the strings. */ labels?: Record } export interface SecuritySuggestUserProfilesRequest extends RequestBase { -/** A query string used to match name-related fields in user profile documents. Name-related fields are the user's `username`, `full_name`, and `email`. */ + /** A query string used to match name-related fields in user profile documents. + * Name-related fields are the user's `username`, `full_name`, and `email`. */ name?: string /** The number of profiles to return. */ size?: long - /** A comma-separated list of filters for the `data` field of the profile document. To return all content use `data=*`. To return a subset of content, use `data=` to retrieve content nested under the specified ``. By default, the API returns no `data` content. It is an error to specify `data` as both the query parameter and the request body field. */ + /** A comma-separated list of filters for the `data` field of the profile document. + * To return all content use `data=*`. + * To return a subset of content, use `data=` to retrieve content nested under the specified ``. + * By default, the API returns no `data` content. + * It is an error to specify `data` as both the query parameter and the request body field. */ data?: string | string[] - /** Extra search criteria to improve relevance of the suggestion result. Profiles matching the spcified hint are ranked higher in the response. Profiles not matching the hint aren't excluded from the response as long as the profile matches the `name` field query. */ + /** Extra search criteria to improve relevance of the suggestion result. + * Profiles matching the spcified hint are ranked higher in the response. + * Profiles not matching the hint aren't excluded from the response as long as the profile matches the `name` field query. */ hint?: SecuritySuggestUserProfilesHint /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { name?: never, size?: never, data?: never, hint?: never } @@ -22833,8 +32962,11 @@ export interface SecuritySuggestUserProfilesRequest extends RequestBase { } export interface SecuritySuggestUserProfilesResponse { + /** Metadata about the number of matching profiles. */ total: SecuritySuggestUserProfilesTotalUserProfiles + /** The number of milliseconds it took Elasticsearch to run the request. */ took: long + /** A list of profile documents, ordered by relevance, that match the search criteria. */ profiles: SecurityUserProfile[] } @@ -22844,13 +32976,24 @@ export interface SecuritySuggestUserProfilesTotalUserProfiles { } export interface SecurityUpdateApiKeyRequest extends RequestBase { -/** The ID of the API key to update. */ + /** The ID of the API key to update. */ id: Id - /** The role descriptors to assign to this API key. The API key's effective permissions are an intersection of its assigned privileges and the point in time snapshot of permissions of the owner user. You can assign new privileges by specifying them in this parameter. To remove assigned privileges, you can supply an empty `role_descriptors` parameter, that is to say, an empty object `{}`. If an API key has no assigned privileges, it inherits the owner user's full permissions. The snapshot of the owner's permissions is always updated, whether you supply the `role_descriptors` parameter or not. The structure of a role descriptor is the same as the request for the create API keys API. */ + /** The role descriptors to assign to this API key. + * The API key's effective permissions are an intersection of its assigned privileges and the point in time snapshot of permissions of the owner user. + * You can assign new privileges by specifying them in this parameter. + * To remove assigned privileges, you can supply an empty `role_descriptors` parameter, that is to say, an empty object `{}`. + * If an API key has no assigned privileges, it inherits the owner user's full permissions. + * The snapshot of the owner's permissions is always updated, whether you supply the `role_descriptors` parameter or not. + * The structure of a role descriptor is the same as the request for the create API keys API. */ role_descriptors?: Record - /** Arbitrary metadata that you want to associate with the API key. It supports a nested data structure. Within the metadata object, keys beginning with `_` are reserved for system usage. When specified, this value fully replaces the metadata previously associated with the API key. */ + /** Arbitrary metadata that you want to associate with the API key. + * It supports a nested data structure. + * Within the metadata object, keys beginning with `_` are reserved for system usage. + * When specified, this value fully replaces the metadata previously associated with the API key. */ metadata?: Metadata - /** The expiration time for the API key. By default, API keys never expire. This property can be omitted to leave the expiration unchanged. */ + /** The expiration time for the API key. + * By default, API keys never expire. + * This property can be omitted to leave the expiration unchanged. */ expiration?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { id?: never, role_descriptors?: never, metadata?: never, expiration?: never } @@ -22859,17 +33002,26 @@ export interface SecurityUpdateApiKeyRequest extends RequestBase { } export interface SecurityUpdateApiKeyResponse { + /** If `true`, the API key was updated. + * If `false`, the API key didn't change because no change was detected. */ updated: boolean } export interface SecurityUpdateCrossClusterApiKeyRequest extends RequestBase { -/** The ID of the cross-cluster API key to update. */ + /** The ID of the cross-cluster API key to update. */ id: Id - /** The access to be granted to this API key. The access is composed of permissions for cross cluster search and cross cluster replication. At least one of them must be specified. When specified, the new access assignment fully replaces the previously assigned access. */ + /** The access to be granted to this API key. + * The access is composed of permissions for cross cluster search and cross cluster replication. + * At least one of them must be specified. + * When specified, the new access assignment fully replaces the previously assigned access. */ access: SecurityAccess - /** The expiration time for the API key. By default, API keys never expire. This property can be omitted to leave the value unchanged. */ + /** The expiration time for the API key. + * By default, API keys never expire. This property can be omitted to leave the value unchanged. */ expiration?: Duration - /** Arbitrary metadata that you want to associate with the API key. It supports nested data structure. Within the metadata object, keys beginning with `_` are reserved for system usage. When specified, this information fully replaces metadata previously associated with the API key. */ + /** Arbitrary metadata that you want to associate with the API key. + * It supports nested data structure. + * Within the metadata object, keys beginning with `_` are reserved for system usage. + * When specified, this information fully replaces metadata previously associated with the API key. */ metadata?: Metadata /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { id?: never, access?: never, expiration?: never, metadata?: never } @@ -22878,13 +33030,17 @@ export interface SecurityUpdateCrossClusterApiKeyRequest extends RequestBase { } export interface SecurityUpdateCrossClusterApiKeyResponse { + /** If `true`, the API key was updated. + * If `false`, the API key didn’t change because no change was detected. */ updated: boolean } export interface SecurityUpdateSettingsRequest extends RequestBase { -/** The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ + /** The period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration - /** The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ + /** The period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration /** Settings for the index used for most security configuration, including native realm users and roles configured with the API. */ security?: SecuritySecuritySettings @@ -22903,17 +33059,25 @@ export interface SecurityUpdateSettingsResponse { } export interface SecurityUpdateUserProfileDataRequest extends RequestBase { -/** A unique identifier for the user profile. */ + /** A unique identifier for the user profile. */ uid: SecurityUserProfileId /** Only perform the operation if the document has this sequence number. */ if_seq_no?: SequenceNumber /** Only perform the operation if the document has this primary term. */ if_primary_term?: long - /** If 'true', Elasticsearch refreshes the affected shards to make this operation visible to search. If 'wait_for', it waits for a refresh to make this operation visible to search. If 'false', nothing is done with refreshes. */ + /** If 'true', Elasticsearch refreshes the affected shards to make this operation + * visible to search. + * If 'wait_for', it waits for a refresh to make this operation visible to search. + * If 'false', nothing is done with refreshes. */ refresh?: Refresh - /** Searchable data that you want to associate with the user profile. This field supports a nested data structure. Within the labels object, top-level keys cannot begin with an underscore (`_`) or contain a period (`.`). */ + /** Searchable data that you want to associate with the user profile. + * This field supports a nested data structure. + * Within the labels object, top-level keys cannot begin with an underscore (`_`) or contain a period (`.`). */ labels?: Record - /** Non-searchable data that you want to associate with the user profile. This field supports a nested data structure. Within the `data` object, top-level keys cannot begin with an underscore (`_`) or contain a period (`.`). The data object is not searchable, but can be retrieved with the get user profile API. */ + /** Non-searchable data that you want to associate with the user profile. + * This field supports a nested data structure. + * Within the `data` object, top-level keys cannot begin with an underscore (`_`) or contain a period (`.`). + * The data object is not searchable, but can be retrieved with the get user profile API. */ data?: Record /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { uid?: never, if_seq_no?: never, if_primary_term?: never, refresh?: never, labels?: never, data?: never } @@ -22926,7 +33090,7 @@ export type SecurityUpdateUserProfileDataResponse = AcknowledgedResponseBase export type ShutdownType = 'restart' | 'remove' | 'replace' export interface ShutdownDeleteNodeRequest extends RequestBase { -/** The node id of node to be removed from the shutdown state */ + /** The node id of node to be removed from the shutdown state */ node_id: NodeId /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: TimeUnit @@ -22960,7 +33124,7 @@ export interface ShutdownGetNodePluginsStatus { } export interface ShutdownGetNodeRequest extends RequestBase { -/** Which node for which to retrieve the shutdown status */ + /** Which node for which to retrieve the shutdown status */ node_id?: NodeIds /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: TimeUnit @@ -22983,19 +33147,37 @@ export type ShutdownGetNodeShutdownStatus = 'not_started' | 'in_progress' | 'sta export type ShutdownGetNodeShutdownType = 'remove' | 'restart' export interface ShutdownPutNodeRequest extends RequestBase { -/** The node identifier. This parameter is not validated against the cluster's active nodes. This enables you to register a node for shut down while it is offline. No error is thrown if you specify an invalid node ID. */ + /** The node identifier. + * This parameter is not validated against the cluster's active nodes. + * This enables you to register a node for shut down while it is offline. + * No error is thrown if you specify an invalid node ID. */ node_id: NodeId - /** The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ + /** The period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: TimeUnit - /** The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ + /** The period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: TimeUnit - /** Valid values are restart, remove, or replace. Use restart when you need to temporarily shut down a node to perform an upgrade, make configuration changes, or perform other maintenance. Because the node is expected to rejoin the cluster, data is not migrated off of the node. Use remove when you need to permanently remove a node from the cluster. The node is not marked ready for shutdown until data is migrated off of the node Use replace to do a 1:1 replacement of a node with another node. Certain allocation decisions will be ignored (such as disk watermarks) in the interest of true replacement of the source node with the target node. During a replace-type shutdown, rollover and index creation may result in unassigned shards, and shrink may fail until the replacement is complete. */ + /** Valid values are restart, remove, or replace. + * Use restart when you need to temporarily shut down a node to perform an upgrade, make configuration changes, or perform other maintenance. + * Because the node is expected to rejoin the cluster, data is not migrated off of the node. + * Use remove when you need to permanently remove a node from the cluster. + * The node is not marked ready for shutdown until data is migrated off of the node Use replace to do a 1:1 replacement of a node with another node. + * Certain allocation decisions will be ignored (such as disk watermarks) in the interest of true replacement of the source node with the target node. + * During a replace-type shutdown, rollover and index creation may result in unassigned shards, and shrink may fail until the replacement is complete. */ type: ShutdownType - /** A human-readable reason that the node is being shut down. This field provides information for other cluster operators; it does not affect the shut down process. */ + /** A human-readable reason that the node is being shut down. + * This field provides information for other cluster operators; it does not affect the shut down process. */ reason: string - /** Only valid if type is restart. Controls how long Elasticsearch will wait for the node to restart and join the cluster before reassigning its shards to other nodes. This works the same as delaying allocation with the index.unassigned.node_left.delayed_timeout setting. If you specify both a restart allocation delay and an index-level allocation delay, the longer of the two is used. */ + /** Only valid if type is restart. + * Controls how long Elasticsearch will wait for the node to restart and join the cluster before reassigning its shards to other nodes. + * This works the same as delaying allocation with the index.unassigned.node_left.delayed_timeout setting. + * If you specify both a restart allocation delay and an index-level allocation delay, the longer of the two is used. */ allocation_delay?: string - /** Only valid if type is replace. Specifies the name of the node that is replacing the node being shut down. Shards from the shut down node are only allowed to be allocated to the target node, and no other data will be allocated to the target node. During relocation of data certain allocation rules are ignored, such as disk watermarks or user attribute filtering rules. */ + /** Only valid if type is replace. + * Specifies the name of the node that is replacing the node being shut down. + * Shards from the shut down node are only allowed to be allocated to the target node, and no other data will be allocated to the target node. + * During relocation of data certain allocation rules are ignored, such as disk watermarks or user attribute filtering rules. */ target_node_name?: string /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { node_id?: never, master_timeout?: never, timeout?: never, type?: never, reason?: never, allocation_delay?: never, target_node_name?: never } @@ -23006,35 +33188,51 @@ export interface ShutdownPutNodeRequest extends RequestBase { export type ShutdownPutNodeResponse = AcknowledgedResponseBase export interface SimulateIngestIngestDocumentSimulationKeys { + /** Identifier for the document. */ _id: Id + /** Name of the index that the document would be indexed into if this were not a simulation. */ _index: IndexName + /** JSON body for the document. */ _source: Record + /** */ _version: SpecUtilsStringified + /** A list of the names of the pipelines executed on this document. */ executed_pipelines: string[] + /** A list of the fields that would be ignored at the indexing step. For example, a field whose + * value is larger than the allowed limit would make it through all of the pipelines, but + * would not be indexed into Elasticsearch. */ ignored_fields?: Record[] + /** Any error resulting from simulatng ingest on this doc. This can be an error generated by + * executing a processor, or a mapping validation error when simulating indexing the resulting + * doc. */ error?: ErrorCause } export type SimulateIngestIngestDocumentSimulation = SimulateIngestIngestDocumentSimulationKeys & { [property: string]: string | Id | IndexName | Record | SpecUtilsStringified | string[] | Record[] | ErrorCause } export interface SimulateIngestRequest extends RequestBase { -/** The index to simulate ingesting into. This value can be overridden by specifying an index on each document. If you specify this parameter in the request path, it is used for any documents that do not explicitly specify an index argument. */ + /** The index to simulate ingesting into. + * This value can be overridden by specifying an index on each document. + * If you specify this parameter in the request path, it is used for any documents that do not explicitly specify an index argument. */ index?: IndexName - /** The pipeline to use as the default pipeline. This value can be used to override the default pipeline of the index. */ + /** The pipeline to use as the default pipeline. + * This value can be used to override the default pipeline of the index. */ pipeline?: PipelineName /** Sample documents to test in the pipeline. */ docs: IngestDocument[] /** A map of component template names to substitute component template definition objects. */ component_template_substitutions?: Record /** A map of index template names to substitute index template definition objects. */ - index_template_subtitutions?: Record + index_template_substitutions?: Record mapping_addition?: MappingTypeMapping - /** Pipelines to test. If you don’t specify the `pipeline` request path parameter, this parameter is required. If you specify both this and the request path parameter, the API only uses the request path parameter. */ + /** Pipelines to test. + * If you don’t specify the `pipeline` request path parameter, this parameter is required. + * If you specify both this and the request path parameter, the API only uses the request path parameter. */ pipeline_substitutions?: Record /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { index?: never, pipeline?: never, docs?: never, component_template_substitutions?: never, index_template_subtitutions?: never, mapping_addition?: never, pipeline_substitutions?: never } + body?: string | { [key: string]: any } & { index?: never, pipeline?: never, docs?: never, component_template_substitutions?: never, index_template_substitutions?: never, mapping_addition?: never, pipeline_substitutions?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { index?: never, pipeline?: never, docs?: never, component_template_substitutions?: never, index_template_subtitutions?: never, mapping_addition?: never, pipeline_substitutions?: never } + querystring?: { [key: string]: any } & { index?: never, pipeline?: never, docs?: never, component_template_substitutions?: never, index_template_substitutions?: never, mapping_addition?: never, pipeline_substitutions?: never } } export interface SimulateIngestResponse { @@ -23046,11 +33244,19 @@ export interface SimulateIngestSimulateIngestDocumentResult { } export interface SlmConfiguration { + /** If false, the snapshot fails if any data stream or index in indices is missing or closed. If true, the snapshot ignores missing or closed data streams and indices. */ ignore_unavailable?: boolean + /** A comma-separated list of data streams and indices to include in the snapshot. Multi-index syntax is supported. + * By default, a snapshot includes all data streams and indices in the cluster. If this argument is provided, the snapshot only includes the specified data streams and clusters. */ indices?: Indices + /** If true, the current global state is included in the snapshot. */ include_global_state?: boolean + /** A list of feature states to be included in this snapshot. A list of features available for inclusion in the snapshot and their descriptions be can be retrieved using the get features API. + * Each feature state includes one or more system indices containing data necessary for the function of that feature. Providing an empty array will include no feature states in the snapshot, regardless of the value of include_global_state. By default, all available feature states will be included in the snapshot if include_global_state is true, or no feature states if include_global_state is false. */ feature_states?: string[] + /** Attaches arbitrary metadata to the snapshot, such as a record of who took the snapshot, why it was taken, or any other useful data. Metadata must be less than 1024 bytes. */ metadata?: Metadata + /** If false, the entire snapshot will fail if one or more indices included in the snapshot do not have all primary shards available. */ partial?: boolean } @@ -23075,8 +33281,11 @@ export interface SlmPolicy { } export interface SlmRetention { + /** Time period after which a snapshot is considered expired and eligible for deletion. SLM deletes expired snapshots based on the slm.retention_schedule. */ expire_after: Duration + /** Maximum number of snapshots to retain, even if the snapshots have not yet expired. If the number of snapshots in the repository exceeds this limit, the policy retains the most recent snapshots and deletes older snapshots. */ max_count: integer + /** Minimum number of snapshots to retain, even if the snapshots have expired. */ min_count: integer } @@ -23084,11 +33293,15 @@ export interface SlmSnapshotLifecycle { in_progress?: SlmInProgress last_failure?: SlmInvocation last_success?: SlmInvocation + /** The last time the policy was modified. */ modified_date?: DateTime modified_date_millis: EpochTime + /** The next time the policy will run. */ next_execution?: DateTime next_execution_millis: EpochTime policy: SlmPolicy + /** The version of the snapshot policy. + * Only the latest version is stored and incremented when the policy is updated. */ version: VersionNumber stats: SlmStatistics } @@ -23101,21 +33314,27 @@ export interface SlmStatistics { retention_timed_out?: long policy?: Id total_snapshots_deleted?: long + /** @alias total_snapshots_deleted */ snapshots_deleted?: long total_snapshot_deletion_failures?: long + /** @alias total_snapshot_deletion_failures */ snapshot_deletion_failures?: long total_snapshots_failed?: long + /** @alias total_snapshots_failed */ snapshots_failed?: long total_snapshots_taken?: long + /** @alias total_snapshots_taken */ snapshots_taken?: long } export interface SlmDeleteLifecycleRequest extends RequestBase { -/** The id of the snapshot lifecycle policy to remove */ + /** The id of the snapshot lifecycle policy to remove */ policy_id: Name - /** The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ + /** The period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration - /** The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ + /** The period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { policy_id?: never, master_timeout?: never, timeout?: never } @@ -23126,11 +33345,13 @@ export interface SlmDeleteLifecycleRequest extends RequestBase { export type SlmDeleteLifecycleResponse = AcknowledgedResponseBase export interface SlmExecuteLifecycleRequest extends RequestBase { -/** The id of the snapshot lifecycle policy to be executed */ + /** The id of the snapshot lifecycle policy to be executed */ policy_id: Name - /** The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ + /** The period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration - /** The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ + /** The period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { policy_id?: never, master_timeout?: never, timeout?: never } @@ -23143,9 +33364,11 @@ export interface SlmExecuteLifecycleResponse { } export interface SlmExecuteRetentionRequest extends RequestBase { -/** The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ + /** The period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration - /** The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ + /** The period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { master_timeout?: never, timeout?: never } @@ -23156,11 +33379,13 @@ export interface SlmExecuteRetentionRequest extends RequestBase { export type SlmExecuteRetentionResponse = AcknowledgedResponseBase export interface SlmGetLifecycleRequest extends RequestBase { -/** Comma-separated list of snapshot lifecycle policies to retrieve */ + /** Comma-separated list of snapshot lifecycle policies to retrieve */ policy_id?: Names - /** The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ + /** The period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration - /** The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ + /** The period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { policy_id?: never, master_timeout?: never, timeout?: never } @@ -23171,7 +33396,7 @@ export interface SlmGetLifecycleRequest extends RequestBase { export type SlmGetLifecycleResponse = Record export interface SlmGetStatsRequest extends RequestBase { -/** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ + /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration @@ -23195,9 +33420,13 @@ export interface SlmGetStatsResponse { } export interface SlmGetStatusRequest extends RequestBase { -/** The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. */ + /** The period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. + * To indicate that the request should never timeout, set it to `-1`. */ master_timeout?: Duration - /** The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. */ + /** The period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. + * To indicate that the request should never timeout, set it to `-1`. */ timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { master_timeout?: never, timeout?: never } @@ -23210,11 +33439,15 @@ export interface SlmGetStatusResponse { } export interface SlmPutLifecycleRequest extends RequestBase { -/** The identifier for the snapshot lifecycle policy you want to create or update. */ + /** The identifier for the snapshot lifecycle policy you want to create or update. */ policy_id: Name - /** The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. */ + /** The period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. + * To indicate that the request should never timeout, set it to `-1`. */ master_timeout?: Duration - /** The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. */ + /** The period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. + * To indicate that the request should never timeout, set it to `-1`. */ timeout?: Duration /** Configuration for each snapshot created by the policy. */ config?: SlmConfiguration @@ -23235,9 +33468,13 @@ export interface SlmPutLifecycleRequest extends RequestBase { export type SlmPutLifecycleResponse = AcknowledgedResponseBase export interface SlmStartRequest extends RequestBase { -/** The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. */ + /** The period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. + * To indicate that the request should never timeout, set it to `-1`. */ master_timeout?: Duration - /** The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. */ + /** The period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. + * To indicate that the request should never timeout, set it to `-1`. */ timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { master_timeout?: never, timeout?: never } @@ -23248,9 +33485,13 @@ export interface SlmStartRequest extends RequestBase { export type SlmStartResponse = AcknowledgedResponseBase export interface SlmStopRequest extends RequestBase { -/** The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. */ + /** The period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. + * To indicate that the request should never timeout, set it to `-1`. */ master_timeout?: Duration - /** The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. */ + /** The period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. + * To indicate that the request should never timeout, set it to `-1`. */ timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { master_timeout?: never, timeout?: never } @@ -23261,17 +33502,42 @@ export interface SlmStopRequest extends RequestBase { export type SlmStopResponse = AcknowledgedResponseBase export interface SnapshotAzureRepository extends SnapshotRepositoryBase { + /** The Azure repository type. */ type: 'azure' + /** The repository settings. */ settings?: SnapshotAzureRepositorySettings } export interface SnapshotAzureRepositorySettings extends SnapshotRepositorySettingsBase { + /** The path to the repository data within the container. + * It defaults to the root directory. + * + * NOTE: Don't set `base_path` when configuring a snapshot repository for Elastic Cloud Enterprise. + * Elastic Cloud Enterprise automatically generates the `base_path` for each deployment so that multiple deployments can share the same bucket. */ base_path?: string + /** The name of the Azure repository client to use. */ client?: string + /** The Azure container. */ container?: string + /** The maxmimum batch size, between 1 and 256, used for `BlobBatch` requests. + * Defaults to 256 which is the maximum number supported by the Azure blob batch API. */ delete_objects_max_size?: integer + /** Either `primary_only` or `secondary_only`. + * Note that if you set it to `secondary_only`, it will force `readonly` to `true`. */ location_mode?: string + /** The maximum number of concurrent batch delete requests that will be submitted for any individual bulk delete with `BlobBatch`. + * Note that the effective number of concurrent deletes is further limited by the Azure client connection and event loop thread limits. + * Defaults to 10, minimum is 1, maximum is 100. */ max_concurrent_batch_deletes?: integer + /** If `true`, the repository is read-only. + * The cluster can retrieve and restore snapshots from the repository but not write to the repository or create snapshots in it. + * + * Only a cluster with write access can create snapshots in the repository. + * All other clusters connected to the repository should have the `readonly` parameter set to `true`. + * If `false`, the cluster can write to the repository and create snapshots in it. + * + * IMPORTANT: If you register the same snapshot repository with multiple clusters, only one cluster should have write access to the repository. + * Having multiple clusters write to the repository at the same time risks corrupting the contents of the repository. */ readonly?: boolean } @@ -23281,15 +33547,35 @@ export interface SnapshotFileCountSnapshotStats { } export interface SnapshotGcsRepository extends SnapshotRepositoryBase { + /** The Google Cloud Storage repository type. */ type: 'gcs' + /** The repository settings. */ settings: SnapshotGcsRepositorySettings } export interface SnapshotGcsRepositorySettings extends SnapshotRepositorySettingsBase { + /** The name of the bucket to be used for snapshots. */ bucket: string + /** The name used by the client when it uses the Google Cloud Storage service. */ application_name?: string + /** The path to the repository data within the bucket. + * It defaults to the root of the bucket. + * + * NOTE: Don't set `base_path` when configuring a snapshot repository for Elastic Cloud Enterprise. + * Elastic Cloud Enterprise automatically generates the `base_path` for each deployment so that multiple deployments can share the same bucket. */ base_path?: string + /** The name of the client to use to connect to Google Cloud Storage. */ client?: string + /** If `true`, the repository is read-only. + * The cluster can retrieve and restore snapshots from the repository but not write to the repository or create snapshots in it. + * + * Only a cluster with write access can create snapshots in the repository. + * All other clusters connected to the repository should have the `readonly` parameter set to `true`. + * + * If `false`, the cluster can write to the repository and create snapshots in it. + * + * IMPORTANT: If you register the same snapshot repository with multiple clusters, only one cluster should have write access to the repository. + * Having multiple clusters write to the repository at the same time risks corrupting the contents of the repository. */ readonly?: boolean } @@ -23306,14 +33592,35 @@ export interface SnapshotInfoFeatureState { } export interface SnapshotReadOnlyUrlRepository extends SnapshotRepositoryBase { + /** The read-only URL repository type. */ type: 'url' + /** The repository settings. */ settings: SnapshotReadOnlyUrlRepositorySettings } export interface SnapshotReadOnlyUrlRepositorySettings extends SnapshotRepositorySettingsBase { + /** The maximum number of retries for HTTP and HTTPS URLs. */ http_max_retries?: integer + /** The maximum wait time for data transfers over a connection. */ http_socket_timeout?: Duration + /** The maximum number of snapshots the repository can contain. + * The default is `Integer.MAX_VALUE`, which is 2^31-1 or `2147483647`. */ max_number_of_snapshots?: integer + /** The URL location of the root of the shared filesystem repository. + * The following protocols are supported: + * + * * `file` + * * `ftp` + * * `http` + * * `https` + * * `jar` + * + * URLs using the HTTP, HTTPS, or FTP protocols must be explicitly allowed with the `repositories.url.allowed_urls` cluster setting. + * This setting supports wildcards in the place of a host, path, query, or fragment in the URL. + * + * URLs using the file protocol must point to the location of a shared filesystem accessible to all master and data nodes in the cluster. + * This location must be registered in the `path.repo` setting. + * You don't need to register URLs using the FTP, HTTP, HTTPS, or JAR protocols in the `path.repo` setting. */ url: string } @@ -23324,41 +33631,113 @@ export interface SnapshotRepositoryBase { } export interface SnapshotRepositorySettingsBase { + /** Big files can be broken down into multiple smaller blobs in the blob store during snapshotting. + * It is not recommended to change this value from its default unless there is an explicit reason for limiting the size of blobs in the repository. + * Setting a value lower than the default can result in an increased number of API calls to the blob store during snapshot create and restore operations compared to using the default value and thus make both operations slower and more costly. + * Specify the chunk size as a byte unit, for example: `10MB`, `5KB`, 500B. + * The default varies by repository type. */ chunk_size?: ByteSize + /** When set to `true`, metadata files are stored in compressed format. + * This setting doesn't affect index files that are already compressed by default. */ compress?: boolean + /** The maximum snapshot restore rate per node. + * It defaults to unlimited. + * Note that restores are also throttled through recovery settings. */ max_restore_bytes_per_sec?: ByteSize + /** The maximum snapshot creation rate per node. + * It defaults to 40mb per second. + * Note that if the recovery settings for managed services are set, then it defaults to unlimited, and the rate is additionally throttled through recovery settings. */ max_snapshot_bytes_per_sec?: ByteSize } export interface SnapshotS3Repository extends SnapshotRepositoryBase { + /** The S3 repository type. */ type: 's3' + /** The repository settings. + * + * NOTE: In addition to the specified settings, you can also use all non-secure client settings in the repository settings. + * In this case, the client settings found in the repository settings will be merged with those of the named client used by the repository. + * Conflicts between client and repository settings are resolved by the repository settings taking precedence over client settings. */ settings: SnapshotS3RepositorySettings } export interface SnapshotS3RepositorySettings extends SnapshotRepositorySettingsBase { + /** The name of the S3 bucket to use for snapshots. + * The bucket name must adhere to Amazon's S3 bucket naming rules. */ bucket: string + /** The path to the repository data within its bucket. + * It defaults to an empty string, meaning that the repository is at the root of the bucket. + * The value of this setting should not start or end with a forward slash (`/`). + * + * NOTE: Don't set base_path when configuring a snapshot repository for Elastic Cloud Enterprise. + * Elastic Cloud Enterprise automatically generates the `base_path` for each deployment so that multiple deployments may share the same bucket. */ base_path?: string + /** The minimum threshold below which the chunk is uploaded using a single request. + * Beyond this threshold, the S3 repository will use the AWS Multipart Upload API to split the chunk into several parts, each of `buffer_size` length, and to upload each part in its own request. + * Note that setting a buffer size lower than 5mb is not allowed since it will prevent the use of the Multipart API and may result in upload errors. + * It is also not possible to set a buffer size greater than 5gb as it is the maximum upload size allowed by S3. + * Defaults to `100mb` or 5% of JVM heap, whichever is smaller. */ buffer_size?: ByteSize + /** The S3 repository supports all S3 canned ACLs: `private`, `public-read`, `public-read-write`, `authenticated-read`, `log-delivery-write`, `bucket-owner-read`, `bucket-owner-full-control`. + * You could specify a canned ACL using the `canned_acl` setting. + * When the S3 repository creates buckets and objects, it adds the canned ACL into the buckets and objects. */ canned_acl?: string + /** The name of the S3 client to use to connect to S3. */ client?: string + /** The maxmimum batch size, between 1 and 1000, used for `DeleteObjects` requests. + * Defaults to 1000 which is the maximum number supported by the AWS DeleteObjects API. */ delete_objects_max_size?: integer + /** The time to wait before trying again if an attempt to read a linearizable register fails. */ get_register_retry_delay?: Duration + /** The maximum number of parts that Elasticsearch will write during a multipart upload of a single object. + * Files which are larger than `buffer_size × max_multipart_parts` will be chunked into several smaller objects. + * Elasticsearch may also split a file across multiple objects to satisfy other constraints such as the `chunk_size` limit. + * Defaults to `10000` which is the maximum number of parts in a multipart upload in AWS S3. */ max_multipart_parts?: integer + /** The maximum number of possibly-dangling multipart uploads to clean up in each batch of snapshot deletions. + * Defaults to 1000 which is the maximum number supported by the AWS ListMultipartUploads API. + * If set to `0`, Elasticsearch will not attempt to clean up dangling multipart uploads. */ max_multipart_upload_cleanup_size?: integer + /** If true, the repository is read-only. + * The cluster can retrieve and restore snapshots from the repository but not write to the repository or create snapshots in it. + * + * Only a cluster with write access can create snapshots in the repository. + * All other clusters connected to the repository should have the `readonly` parameter set to `true`. + * + * If `false`, the cluster can write to the repository and create snapshots in it. + * + * IMPORTANT: If you register the same snapshot repository with multiple clusters, only one cluster should have write access to the repository. + * Having multiple clusters write to the repository at the same time risks corrupting the contents of the repository. */ readonly?: boolean + /** When set to `true`, files are encrypted on server side using an AES256 algorithm. */ server_side_encryption?: boolean + /** The S3 storage class for objects written to the repository. + * Values may be `standard`, `reduced_redundancy`, `standard_ia`, `onezone_ia`, and `intelligent_tiering`. */ storage_class?: string + /** The delay before the first retry and the amount the delay is incremented by on each subsequent retry. + * The default is 50ms and the minimum is 0ms. */ 'throttled_delete_retry.delay_increment'?: Duration + /** The upper bound on how long the delays between retries will grow to. + * The default is 5s and the minimum is 0ms. */ 'throttled_delete_retry.maximum_delay'?: Duration + /** The number times to retry a throttled snapshot deletion. + * The default is 10 and the minimum value is 0 which will disable retries altogether. + * Note that if retries are enabled in the Azure client, each of these retries comprises that many client-level retries. */ 'throttled_delete_retry.maximum_number_of_retries'?: integer } export interface SnapshotShardsStats { + /** The number of shards that initialized, started, and finalized successfully. */ done: long + /** The number of shards that failed to be included in the snapshot. */ failed: long + /** The number of shards that are finalizing but are not done. */ finalizing: long + /** The number of shards that are still initializing. */ initializing: long + /** The number of shards that have started but are not finalized. */ started: long + /** The total number of shards included in the snapshot. */ total: long } @@ -23378,13 +33757,30 @@ export interface SnapshotShardsStatsSummaryItem { } export interface SnapshotSharedFileSystemRepository extends SnapshotRepositoryBase { + /** The shared file system repository type. */ type: 'fs' + /** The repository settings. */ settings: SnapshotSharedFileSystemRepositorySettings } export interface SnapshotSharedFileSystemRepositorySettings extends SnapshotRepositorySettingsBase { + /** The location of the shared filesystem used to store and retrieve snapshots. + * This location must be registered in the `path.repo` setting on all master and data nodes in the cluster. + * Unlike `path.repo`, this setting supports only a single file path. */ location: string + /** The maximum number of snapshots the repository can contain. + * The default is `Integer.MAX_VALUE`, which is 2^31-1 or `2147483647`. */ max_number_of_snapshots?: integer + /** If `true`, the repository is read-only. + * The cluster can retrieve and restore snapshots from the repository but not write to the repository or create snapshots in it. + * + * Only a cluster with write access can create snapshots in the repository. + * All other clusters connected to the repository should have the `readonly` parameter set to `true`. + * + * If `false`, the cluster can write to the repository and create snapshots in it. + * + * IMPORTANT: If you register the same snapshot repository with multiple clusters, only one cluster should have write access to the repository. + * Having multiple clusters write to the repository at the same time risks corrupting the contents of the repository. */ readonly?: boolean } @@ -23435,47 +33831,97 @@ export interface SnapshotSnapshotShardsStatus { export type SnapshotSnapshotSort = 'start_time' | 'duration' | 'name' | 'index_count' | 'repository' | 'shard_count' | 'failed_shard_count' export interface SnapshotSnapshotStats { + /** The number and size of files that still need to be copied as part of the incremental snapshot. + * For completed snapshots, this property indicates the number and size of files that were not already in the repository and were copied as part of the incremental snapshot. */ incremental: SnapshotFileCountSnapshotStats + /** The time, in milliseconds, when the snapshot creation process started. */ start_time_in_millis: EpochTime time?: Duration + /** The total time, in milliseconds, that it took for the snapshot process to complete. */ time_in_millis: DurationValue + /** The total number and size of files that are referenced by the snapshot. */ total: SnapshotFileCountSnapshotStats } export interface SnapshotSourceOnlyRepository extends SnapshotRepositoryBase { + /** The source-only repository type. */ type: 'source' + /** The repository settings. */ settings: SnapshotSourceOnlyRepositorySettings } export interface SnapshotSourceOnlyRepositorySettings extends SnapshotRepositorySettingsBase { + /** The delegated repository type. For valid values, refer to the `type` parameter. + * Source repositories can use `settings` properties for its delegated repository type. */ delegate_type?: string + /** The maximum number of snapshots the repository can contain. + * The default is `Integer.MAX_VALUE`, which is 2^31-1 or `2147483647`. */ max_number_of_snapshots?: integer + /** If `true`, the repository is read-only. + * The cluster can retrieve and restore snapshots from the repository but not write to the repository or create snapshots in it. + * + * Only a cluster with write access can create snapshots in the repository. + * All other clusters connected to the repository should have the `readonly` parameter set to `true`. + * + * If `false`, the cluster can write to the repository and create snapshots in it. + * + * IMPORTANT: If you register the same snapshot repository with multiple clusters, only one cluster should have write access to the repository. + * Having multiple clusters write to the repository at the same time risks corrupting the contents of the repository. */ read_only?: boolean + /** If `true`, the repository is read-only. + * The cluster can retrieve and restore snapshots from the repository but not write to the repository or create snapshots in it. + * + * Only a cluster with write access can create snapshots in the repository. + * All other clusters connected to the repository should have the `readonly` parameter set to `true`. + * + * If `false`, the cluster can write to the repository and create snapshots in it. + * + * IMPORTANT: If you register the same snapshot repository with multiple clusters, only one cluster should have write access to the repository. + * Having multiple clusters write to the repository at the same time risks corrupting the contents of the repository. + * @alias read_only */ readonly?: boolean } export interface SnapshotStatus { + /** Indicates whether the current cluster state is included in the snapshot. */ include_global_state: boolean indices: Record + /** The name of the repository that includes the snapshot. */ repository: string + /** Statistics for the shards in the snapshot. */ shards_stats: SnapshotShardsStats + /** The name of the snapshot. */ snapshot: string + /** The current snapshot state: + * + * * `FAILED`: The snapshot finished with an error and failed to store any data. + * * `STARTED`: The snapshot is currently running. + * * `SUCCESS`: The snapshot completed. */ state: string + /** Details about the number (`file_count`) and size (`size_in_bytes`) of files included in the snapshot. */ stats: SnapshotSnapshotStats + /** The universally unique identifier (UUID) for the snapshot. */ uuid: Uuid } export interface SnapshotCleanupRepositoryCleanupRepositoryResults { + /** The number of binary large objects (blobs) removed from the snapshot repository during cleanup operations. + * A non-zero value indicates that unreferenced blobs were found and subsequently cleaned up. */ deleted_blobs: long + /** The number of bytes freed by cleanup operations. */ deleted_bytes: long } export interface SnapshotCleanupRepositoryRequest extends RequestBase { -/** The name of the snapshot repository to clean up. */ + /** The name of the snapshot repository to clean up. */ name: Name - /** The period to wait for a connection to the master node. If the master node is not available before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1` */ + /** The period to wait for a connection to the master node. + * If the master node is not available before the timeout expires, the request fails and returns an error. + * To indicate that the request should never timeout, set it to `-1` */ master_timeout?: Duration - /** The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. If no response is received before the timeout expires, the cluster metadata update still applies but the response will indicate that it was not completely acknowledged. To indicate that the request should never timeout, set it to `-1`. */ + /** The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. + * If no response is received before the timeout expires, the cluster metadata update still applies but the response will indicate that it was not completely acknowledged. + * To indicate that the request should never timeout, set it to `-1`. */ timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { name?: never, master_timeout?: never, timeout?: never } @@ -23484,52 +33930,82 @@ export interface SnapshotCleanupRepositoryRequest extends RequestBase { } export interface SnapshotCleanupRepositoryResponse { + /** Statistics for cleanup operations. */ results: SnapshotCleanupRepositoryCleanupRepositoryResults } export interface SnapshotCloneRequest extends RequestBase { -/** The name of the snapshot repository that both source and target snapshot belong to. */ + /** The name of the snapshot repository that both source and target snapshot belong to. */ repository: Name /** The source snapshot name. */ snapshot: Name /** The target snapshot name. */ target_snapshot: Name - /** The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. */ + /** The period to wait for the master node. + * If the master node is not available before the timeout expires, the request fails and returns an error. + * To indicate that the request should never timeout, set it to `-1`. */ master_timeout?: Duration - /** The period of time to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ - timeout?: Duration - /** A comma-separated list of indices to include in the snapshot. Multi-target syntax is supported. */ + /** A comma-separated list of indices to include in the snapshot. + * Multi-target syntax is supported. */ indices: string /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { repository?: never, snapshot?: never, target_snapshot?: never, master_timeout?: never, timeout?: never, indices?: never } + body?: string | { [key: string]: any } & { repository?: never, snapshot?: never, target_snapshot?: never, master_timeout?: never, indices?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { repository?: never, snapshot?: never, target_snapshot?: never, master_timeout?: never, timeout?: never, indices?: never } + querystring?: { [key: string]: any } & { repository?: never, snapshot?: never, target_snapshot?: never, master_timeout?: never, indices?: never } } export type SnapshotCloneResponse = AcknowledgedResponseBase export interface SnapshotCreateRequest extends RequestBase { -/** The name of the repository for the snapshot. */ + /** The name of the repository for the snapshot. */ repository: Name - /** The name of the snapshot. It supportes date math. It must be unique in the repository. */ + /** The name of the snapshot. + * It supportes date math. + * It must be unique in the repository. */ snapshot: Name - /** The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ + /** The period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration - /** If `true`, the request returns a response when the snapshot is complete. If `false`, the request returns a response when the snapshot initializes. */ + /** If `true`, the request returns a response when the snapshot is complete. + * If `false`, the request returns a response when the snapshot initializes. */ wait_for_completion?: boolean - /** Determines how wildcard patterns in the `indices` parameter match data streams and indices. It supports comma-separated values such as `open,hidden`. */ + /** Determines how wildcard patterns in the `indices` parameter match data streams and indices. + * It supports comma-separated values such as `open,hidden`. */ expand_wildcards?: ExpandWildcards - /** The feature states to include in the snapshot. Each feature state includes one or more system indices containing related data. You can view a list of eligible features using the get features API. If `include_global_state` is `true`, all current feature states are included by default. If `include_global_state` is `false`, no feature states are included by default. Note that specifying an empty array will result in the default behavior. To exclude all feature states, regardless of the `include_global_state` value, specify an array with only the value `none` (`["none"]`). */ + /** The feature states to include in the snapshot. + * Each feature state includes one or more system indices containing related data. + * You can view a list of eligible features using the get features API. + * + * If `include_global_state` is `true`, all current feature states are included by default. + * If `include_global_state` is `false`, no feature states are included by default. + * + * Note that specifying an empty array will result in the default behavior. + * To exclude all feature states, regardless of the `include_global_state` value, specify an array with only the value `none` (`["none"]`). */ feature_states?: string[] - /** If `true`, the request ignores data streams and indices in `indices` that are missing or closed. If `false`, the request returns an error for any data stream or index that is missing or closed. */ + /** If `true`, the request ignores data streams and indices in `indices` that are missing or closed. + * If `false`, the request returns an error for any data stream or index that is missing or closed. */ ignore_unavailable?: boolean - /** If `true`, the current cluster state is included in the snapshot. The cluster state includes persistent cluster settings, composable index templates, legacy index templates, ingest pipelines, and ILM policies. It also includes data stored in system indices, such as Watches and task records (configurable via `feature_states`). */ + /** If `true`, the current cluster state is included in the snapshot. + * The cluster state includes persistent cluster settings, composable index templates, legacy index templates, ingest pipelines, and ILM policies. + * It also includes data stored in system indices, such as Watches and task records (configurable via `feature_states`). */ include_global_state?: boolean - /** A comma-separated list of data streams and indices to include in the snapshot. It supports a multi-target syntax. The default is an empty array (`[]`), which includes all regular data streams and regular indices. To exclude all data streams and indices, use `-*`. You can't use this parameter to include or exclude system indices or system data streams from a snapshot. Use `feature_states` instead. */ + /** A comma-separated list of data streams and indices to include in the snapshot. + * It supports a multi-target syntax. + * The default is an empty array (`[]`), which includes all regular data streams and regular indices. + * To exclude all data streams and indices, use `-*`. + * + * You can't use this parameter to include or exclude system indices or system data streams from a snapshot. + * Use `feature_states` instead. */ indices?: Indices - /** Arbitrary metadata to the snapshot, such as a record of who took the snapshot, why it was taken, or any other useful data. It can have any contents but it must be less than 1024 bytes. This information is not automatically generated by Elasticsearch. */ + /** Arbitrary metadata to the snapshot, such as a record of who took the snapshot, why it was taken, or any other useful data. + * It can have any contents but it must be less than 1024 bytes. + * This information is not automatically generated by Elasticsearch. */ metadata?: Metadata - /** If `true`, it enables you to restore a partial snapshot of indices with unavailable shards. Only shards that were successfully included in the snapshot will be restored. All missing shards will be recreated as empty. If `false`, the entire restore operation will fail if one or more indices included in the snapshot do not have all primary shards available. */ + /** If `true`, it enables you to restore a partial snapshot of indices with unavailable shards. + * Only shards that were successfully included in the snapshot will be restored. + * All missing shards will be recreated as empty. + * + * If `false`, the entire restore operation will fail if one or more indices included in the snapshot do not have all primary shards available. */ partial?: boolean /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { repository?: never, snapshot?: never, master_timeout?: never, wait_for_completion?: never, expand_wildcards?: never, feature_states?: never, ignore_unavailable?: never, include_global_state?: never, indices?: never, metadata?: never, partial?: never } @@ -23538,18 +34014,26 @@ export interface SnapshotCreateRequest extends RequestBase { } export interface SnapshotCreateResponse { + /** Equals `true` if the snapshot was accepted. Present when the request had `wait_for_completion` set to `false` */ accepted?: boolean + /** Snapshot information. Present when the request had `wait_for_completion` set to `true` */ snapshot?: SnapshotSnapshotInfo } export interface SnapshotCreateRepositoryRequest extends RequestBase { -/** The name of the snapshot repository to register or update. */ + /** The name of the snapshot repository to register or update. */ name: Name - /** The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. */ + /** The period to wait for the master node. + * If the master node is not available before the timeout expires, the request fails and returns an error. + * To indicate that the request should never timeout, set it to `-1`. */ master_timeout?: Duration - /** The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. If no response is received before the timeout expires, the cluster metadata update still applies but the response will indicate that it was not completely acknowledged. To indicate that the request should never timeout, set it to `-1`. */ + /** The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. + * If no response is received before the timeout expires, the cluster metadata update still applies but the response will indicate that it was not completely acknowledged. + * To indicate that the request should never timeout, set it to `-1`. */ timeout?: Duration - /** If `true`, the request verifies the repository is functional on all master and data nodes in the cluster. If `false`, this verification is skipped. You can also perform this verification with the verify snapshot repository API. */ + /** If `true`, the request verifies the repository is functional on all master and data nodes in the cluster. + * If `false`, this verification is skipped. + * You can also perform this verification with the verify snapshot repository API. */ verify?: boolean repository?: SnapshotRepository /** All values in `body` will be added to the request body. */ @@ -23561,11 +34045,14 @@ export interface SnapshotCreateRepositoryRequest extends RequestBase { export type SnapshotCreateRepositoryResponse = AcknowledgedResponseBase export interface SnapshotDeleteRequest extends RequestBase { -/** The name of the repository to delete a snapshot from. */ + /** The name of the repository to delete a snapshot from. */ repository: Name - /** A comma-separated list of snapshot names to delete. It also accepts wildcards (`*`). */ + /** A comma-separated list of snapshot names to delete. + * It also accepts wildcards (`*`). */ snapshot: Name - /** The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. */ + /** The period to wait for the master node. + * If the master node is not available before the timeout expires, the request fails and returns an error. + * To indicate that the request should never timeout, set it to `-1`. */ master_timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { repository?: never, snapshot?: never, master_timeout?: never } @@ -23576,11 +34063,16 @@ export interface SnapshotDeleteRequest extends RequestBase { export type SnapshotDeleteResponse = AcknowledgedResponseBase export interface SnapshotDeleteRepositoryRequest extends RequestBase { -/** The ame of the snapshot repositories to unregister. Wildcard (`*`) patterns are supported. */ + /** The ame of the snapshot repositories to unregister. + * Wildcard (`*`) patterns are supported. */ name: Names - /** The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. */ + /** The period to wait for the master node. + * If the master node is not available before the timeout expires, the request fails and returns an error. + * To indicate that the request should never timeout, set it to `-1`. */ master_timeout?: Duration - /** The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. If no response is received before the timeout expires, the cluster metadata update still applies but the response will indicate that it was not completely acknowledged. To indicate that the request should never timeout, set it to `-1`. */ + /** The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. + * If no response is received before the timeout expires, the cluster metadata update still applies but the response will indicate that it was not completely acknowledged. + * To indicate that the request should never timeout, set it to `-1`. */ timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { name?: never, master_timeout?: never, timeout?: never } @@ -23591,35 +34083,55 @@ export interface SnapshotDeleteRepositoryRequest extends RequestBase { export type SnapshotDeleteRepositoryResponse = AcknowledgedResponseBase export interface SnapshotGetRequest extends RequestBase { -/** A comma-separated list of snapshot repository names used to limit the request. Wildcard (`*`) expressions are supported. */ + /** A comma-separated list of snapshot repository names used to limit the request. + * Wildcard (`*`) expressions are supported. */ repository: Name - /** A comma-separated list of snapshot names to retrieve Wildcards (`*`) are supported. * To get information about all snapshots in a registered repository, use a wildcard (`*`) or `_all`. * To get information about any snapshots that are currently running, use `_current`. */ + /** A comma-separated list of snapshot names to retrieve + * Wildcards (`*`) are supported. + * + * * To get information about all snapshots in a registered repository, use a wildcard (`*`) or `_all`. + * * To get information about any snapshots that are currently running, use `_current`. */ snapshot: Names /** An offset identifier to start pagination from as returned by the next field in the response body. */ after?: string - /** The value of the current sort column at which to start retrieval. It can be a string `snapshot-` or a repository name when sorting by snapshot or repository name. It can be a millisecond time value or a number when sorting by `index-` or shard count. */ + /** The value of the current sort column at which to start retrieval. + * It can be a string `snapshot-` or a repository name when sorting by snapshot or repository name. + * It can be a millisecond time value or a number when sorting by `index-` or shard count. */ from_sort_value?: string /** If `false`, the request returns an error for any snapshots that are unavailable. */ ignore_unavailable?: boolean - /** If `true`, the response includes additional information about each index in the snapshot comprising the number of shards in the index, the total size of the index in bytes, and the maximum number of segments per shard in the index. The default is `false`, meaning that this information is omitted. */ + /** If `true`, the response includes additional information about each index in the snapshot comprising the number of shards in the index, the total size of the index in bytes, and the maximum number of segments per shard in the index. + * The default is `false`, meaning that this information is omitted. */ index_details?: boolean /** If `true`, the response includes the name of each index in each snapshot. */ index_names?: boolean /** If `true`, the response includes the repository name in each snapshot. */ include_repository?: boolean - /** The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ + /** The period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration - /** The sort order. Valid values are `asc` for ascending and `desc` for descending order. The default behavior is ascending order. */ + /** The sort order. + * Valid values are `asc` for ascending and `desc` for descending order. + * The default behavior is ascending order. */ order?: SortOrder /** Numeric offset to start pagination from based on the snapshots matching this request. Using a non-zero value for this parameter is mutually exclusive with using the after parameter. Defaults to 0. */ offset?: integer - /** The maximum number of snapshots to return. The default is 0, which means to return all that match the request without limit. */ + /** The maximum number of snapshots to return. + * The default is 0, which means to return all that match the request without limit. */ size?: integer - /** Filter snapshots by a comma-separated list of snapshot lifecycle management (SLM) policy names that snapshots belong to. You can use wildcards (`*`) and combinations of wildcards followed by exclude patterns starting with `-`. For example, the pattern `*,-policy-a-\*` will return all snapshots except for those that were created by an SLM policy with a name starting with `policy-a-`. Note that the wildcard pattern `*` matches all snapshots created by an SLM policy but not those snapshots that were not created by an SLM policy. To include snapshots that were not created by an SLM policy, you can use the special pattern `_none` that will match all snapshots without an SLM policy. */ + /** Filter snapshots by a comma-separated list of snapshot lifecycle management (SLM) policy names that snapshots belong to. + * + * You can use wildcards (`*`) and combinations of wildcards followed by exclude patterns starting with `-`. + * For example, the pattern `*,-policy-a-\*` will return all snapshots except for those that were created by an SLM policy with a name starting with `policy-a-`. + * Note that the wildcard pattern `*` matches all snapshots created by an SLM policy but not those snapshots that were not created by an SLM policy. + * To include snapshots that were not created by an SLM policy, you can use the special pattern `_none` that will match all snapshots without an SLM policy. */ slm_policy_filter?: Name - /** The sort order for the result. The default behavior is sorting by snapshot start time stamp. */ + /** The sort order for the result. + * The default behavior is sorting by snapshot start time stamp. */ sort?: SnapshotSnapshotSort - /** If `true`, returns additional information about each snapshot such as the version of Elasticsearch which took the snapshot, the start and end times of the snapshot, and the number of shards snapshotted. NOTE: The parameters `size`, `order`, `after`, `from_sort_value`, `offset`, `slm_policy_filter`, and `sort` are not supported when you set `verbose=false` and the sort order for requests with `verbose=false` is undefined. */ + /** If `true`, returns additional information about each snapshot such as the version of Elasticsearch which took the snapshot, the start and end times of the snapshot, and the number of shards snapshotted. + * + * NOTE: The parameters `size`, `order`, `after`, `from_sort_value`, `offset`, `slm_policy_filter`, and `sort` are not supported when you set `verbose=false` and the sort order for requests with `verbose=false` is undefined. */ verbose?: boolean /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { repository?: never, snapshot?: never, after?: never, from_sort_value?: never, ignore_unavailable?: never, index_details?: never, index_names?: never, include_repository?: never, master_timeout?: never, order?: never, offset?: never, size?: never, slm_policy_filter?: never, sort?: never, verbose?: never } @@ -23628,8 +34140,12 @@ export interface SnapshotGetRequest extends RequestBase { } export interface SnapshotGetResponse { + /** The number of remaining snapshots that were not returned due to size limits and that can be fetched by additional requests using the `next` field value. */ remaining: integer + /** The total number of snapshots that match the request when ignoring the size limit or `after` query parameter. */ total: integer + /** If the request contained a size limit and there might be more results, a `next` field will be added to the response. + * It can be used as the `after` query parameter to fetch additional results. */ next?: string responses?: SnapshotGetSnapshotResponseItem[] snapshots?: SnapshotSnapshotInfo[] @@ -23642,11 +34158,17 @@ export interface SnapshotGetSnapshotResponseItem { } export interface SnapshotGetRepositoryRequest extends RequestBase { -/** A comma-separated list of snapshot repository names used to limit the request. Wildcard (`*`) expressions are supported including combining wildcards with exclude patterns starting with `-`. To get information about all snapshot repositories registered in the cluster, omit this parameter or use `*` or `_all`. */ + /** A comma-separated list of snapshot repository names used to limit the request. + * Wildcard (`*`) expressions are supported including combining wildcards with exclude patterns starting with `-`. + * + * To get information about all snapshot repositories registered in the cluster, omit this parameter or use `*` or `_all`. */ name?: Names - /** If `true`, the request gets information from the local node only. If `false`, the request gets information from the master node. */ + /** If `true`, the request gets information from the local node only. + * If `false`, the request gets information from the master node. */ local?: boolean - /** The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. */ + /** The period to wait for the master node. + * If the master node is not available before the timeout expires, the request fails and returns an error. + * To indicate that the request should never timeout, set it to `-1`. */ master_timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { name?: never, local?: never, master_timeout?: never } @@ -23657,67 +34179,117 @@ export interface SnapshotGetRepositoryRequest extends RequestBase { export type SnapshotGetRepositoryResponse = Record export interface SnapshotRepositoryAnalyzeBlobDetails { + /** The name of the blob. */ name: string + /** Indicates whether the blob was overwritten while the read operations were ongoing. + * /** */ overwritten: boolean read_early: boolean + /** The position, in bytes, at which read operations completed. */ read_end: long + /** The position, in bytes, at which read operations started. */ read_start: long + /** A description of every read operation performed on the blob. */ reads: SnapshotRepositoryAnalyzeReadBlobDetails + /** The size of the blob. */ size: ByteSize + /** The size of the blob in bytes. */ size_bytes: long } export interface SnapshotRepositoryAnalyzeDetailsInfo { + /** A description of the blob that was written and read. */ blob: SnapshotRepositoryAnalyzeBlobDetails + /** The elapsed time spent overwriting the blob. + * If the blob was not overwritten, this information is omitted. */ overwrite_elapsed?: Duration + /** The elapsed time spent overwriting the blob, in nanoseconds. + * If the blob was not overwritten, this information is omitted. */ overwrite_elapsed_nanos?: DurationValue + /** The elapsed time spent writing the blob. */ write_elapsed: Duration + /** The elapsed time spent writing the blob, in nanoseconds. */ write_elapsed_nanos: DurationValue + /** The length of time spent waiting for the `max_snapshot_bytes_per_sec` (or `indices.recovery.max_bytes_per_sec` if the recovery settings for managed services are set) throttle while writing the blob. */ write_throttled: Duration + /** The length of time spent waiting for the `max_snapshot_bytes_per_sec` (or `indices.recovery.max_bytes_per_sec` if the recovery settings for managed services are set) throttle while writing the blob, in nanoseconds. */ write_throttled_nanos: DurationValue + /** The node which wrote the blob and coordinated the read operations. */ writer_node: SnapshotRepositoryAnalyzeSnapshotNodeInfo } export interface SnapshotRepositoryAnalyzeReadBlobDetails { + /** Indicates whether the read operation may have started before the write operation was complete. */ before_write_complete?: boolean + /** The length of time spent reading the blob. + * If the blob was not found, this detail is omitted. */ elapsed?: Duration + /** The length of time spent reading the blob, in nanoseconds. + * If the blob was not found, this detail is omitted. */ elapsed_nanos?: DurationValue + /** The length of time waiting for the first byte of the read operation to be received. + * If the blob was not found, this detail is omitted. */ first_byte_time?: Duration + /** The length of time waiting for the first byte of the read operation to be received, in nanoseconds. + * If the blob was not found, this detail is omitted. */ first_byte_time_nanos: DurationValue + /** Indicates whether the blob was found by the read operation. + * If the read was started before the write completed or the write was ended before completion, it might be false. */ found: boolean + /** The node that performed the read operation. */ node: SnapshotRepositoryAnalyzeSnapshotNodeInfo + /** The length of time spent waiting due to the `max_restore_bytes_per_sec` or `indices.recovery.max_bytes_per_sec` throttles during the read of the blob. + * If the blob was not found, this detail is omitted. */ throttled?: Duration + /** The length of time spent waiting due to the `max_restore_bytes_per_sec` or `indices.recovery.max_bytes_per_sec` throttles during the read of the blob, in nanoseconds. + * If the blob was not found, this detail is omitted. */ throttled_nanos?: DurationValue } export interface SnapshotRepositoryAnalyzeReadSummaryInfo { + /** The number of read operations performed in the test. */ count: integer + /** The maximum time spent waiting for the first byte of any read request to be received. */ max_wait: Duration + /** The maximum time spent waiting for the first byte of any read request to be received, in nanoseconds. */ max_wait_nanos: DurationValue + /** The total elapsed time spent on reading blobs in the test. */ total_elapsed: Duration + /** The total elapsed time spent on reading blobs in the test, in nanoseconds. */ total_elapsed_nanos: DurationValue + /** The total size of all the blobs or partial blobs read in the test. */ total_size: ByteSize + /** The total size of all the blobs or partial blobs read in the test, in bytes. */ total_size_bytes: long + /** The total time spent waiting due to the `max_restore_bytes_per_sec` or `indices.recovery.max_bytes_per_sec` throttles. */ total_throttled: Duration + /** The total time spent waiting due to the `max_restore_bytes_per_sec` or `indices.recovery.max_bytes_per_sec` throttles, in nanoseconds. */ total_throttled_nanos: DurationValue + /** The total time spent waiting for the first byte of each read request to be received. */ total_wait: Duration + /** The total time spent waiting for the first byte of each read request to be received, in nanoseconds. */ total_wait_nanos: DurationValue } export interface SnapshotRepositoryAnalyzeRequest extends RequestBase { -/** The name of the repository. */ + /** The name of the repository. */ name: Name - /** The total number of blobs to write to the repository during the test. For realistic experiments, you should set it to at least `2000`. */ + /** The total number of blobs to write to the repository during the test. + * For realistic experiments, you should set it to at least `2000`. */ blob_count?: integer /** The number of operations to run concurrently during the test. */ concurrency?: integer - /** Indicates whether to return detailed results, including timing information for every operation performed during the analysis. If false, it returns only a summary of the analysis. */ + /** Indicates whether to return detailed results, including timing information for every operation performed during the analysis. + * If false, it returns only a summary of the analysis. */ detailed?: boolean - /** The number of nodes on which to perform an early read operation while writing each blob. Early read operations are only rarely performed. */ + /** The number of nodes on which to perform an early read operation while writing each blob. + * Early read operations are only rarely performed. */ early_read_node_count?: integer - /** The maximum size of a blob to be written during the test. For realistic experiments, you should set it to at least `2gb`. */ + /** The maximum size of a blob to be written during the test. + * For realistic experiments, you should set it to at least `2gb`. */ max_blob_size?: ByteSize - /** An upper limit on the total size of all the blobs written during the test. For realistic experiments, you should set it to at least `1tb`. */ + /** An upper limit on the total size of all the blobs written during the test. + * For realistic experiments, you should set it to at least `1tb`. */ max_total_data_size?: ByteSize /** The probability of performing a rare action such as an early read, an overwrite, or an aborted write on each blob. */ rare_action_probability?: double @@ -23725,11 +34297,15 @@ export interface SnapshotRepositoryAnalyzeRequest extends RequestBase { rarely_abort_writes?: boolean /** The number of nodes on which to read a blob after writing. */ read_node_count?: integer - /** The minimum number of linearizable register operations to perform in total. For realistic experiments, you should set it to at least `100`. */ + /** The minimum number of linearizable register operations to perform in total. + * For realistic experiments, you should set it to at least `100`. */ register_operation_count?: integer - /** The seed for the pseudo-random number generator used to generate the list of operations performed during the test. To repeat the same set of operations in multiple experiments, use the same seed in each experiment. Note that the operations are performed concurrently so might not always happen in the same order on each run. */ + /** The seed for the pseudo-random number generator used to generate the list of operations performed during the test. + * To repeat the same set of operations in multiple experiments, use the same seed in each experiment. + * Note that the operations are performed concurrently so might not always happen in the same order on each run. */ seed?: integer - /** The period of time to wait for the test to complete. If no response is received before the timeout expires, the test is cancelled and returns an error. */ + /** The period of time to wait for the test to complete. + * If no response is received before the timeout expires, the test is cancelled and returns an error. */ timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { name?: never, blob_count?: never, concurrency?: never, detailed?: never, early_read_node_count?: never, max_blob_size?: never, max_total_data_size?: never, rare_action_probability?: never, rarely_abort_writes?: never, read_node_count?: never, register_operation_count?: never, seed?: never, timeout?: never } @@ -23738,25 +34314,46 @@ export interface SnapshotRepositoryAnalyzeRequest extends RequestBase { } export interface SnapshotRepositoryAnalyzeResponse { + /** The number of blobs written to the repository during the test. */ blob_count: integer + /** The path in the repository under which all the blobs were written during the test. */ blob_path: string + /** The number of write operations performed concurrently during the test. */ concurrency: integer + /** The node that coordinated the analysis and performed the final cleanup. */ coordinating_node: SnapshotRepositoryAnalyzeSnapshotNodeInfo + /** The time it took to delete all the blobs in the container. */ delete_elapsed: Duration + /** The time it took to delete all the blobs in the container, in nanoseconds. */ delete_elapsed_nanos: DurationValue + /** A description of every read and write operation performed during the test. */ details: SnapshotRepositoryAnalyzeDetailsInfo + /** The limit on the number of nodes on which early read operations were performed after writing each blob. */ early_read_node_count: integer + /** A list of correctness issues detected, which is empty if the API succeeded. + * It is included to emphasize that a successful response does not guarantee correct behaviour in future. */ issues_detected: string[] + /** The time it took to retrieve a list of all the blobs in the container. */ listing_elapsed: Duration + /** The time it took to retrieve a list of all the blobs in the container, in nanoseconds. */ listing_elapsed_nanos: DurationValue + /** The limit on the size of a blob written during the test. */ max_blob_size: ByteSize + /** The limit, in bytes, on the size of a blob written during the test. */ max_blob_size_bytes: long + /** The limit on the total size of all blob written during the test. */ max_total_data_size: ByteSize + /** The limit, in bytes, on the total size of all blob written during the test. */ max_total_data_size_bytes: long + /** The probability of performing rare actions during the test. */ rare_action_probability: double + /** The limit on the number of nodes on which read operations were performed after writing each blob. */ read_node_count: integer + /** The name of the repository that was the subject of the analysis. */ repository: string + /** The seed for the pseudo-random number generator used to generate the operations used during the test. */ seed: long + /** A collection of statistics that summarize the results of the test. */ summary: SnapshotRepositoryAnalyzeSummaryInfo } @@ -23766,38 +34363,52 @@ export interface SnapshotRepositoryAnalyzeSnapshotNodeInfo { } export interface SnapshotRepositoryAnalyzeSummaryInfo { + /** A collection of statistics that summarise the results of the read operations in the test. */ read: SnapshotRepositoryAnalyzeReadSummaryInfo + /** A collection of statistics that summarise the results of the write operations in the test. */ write: SnapshotRepositoryAnalyzeWriteSummaryInfo } export interface SnapshotRepositoryAnalyzeWriteSummaryInfo { + /** The number of write operations performed in the test. */ count: integer + /** The total elapsed time spent on writing blobs in the test. */ total_elapsed: Duration + /** The total elapsed time spent on writing blobs in the test, in nanoseconds. */ total_elapsed_nanos: DurationValue + /** The total size of all the blobs written in the test. */ total_size: ByteSize + /** The total size of all the blobs written in the test, in bytes. */ total_size_bytes: long + /** The total time spent waiting due to the `max_snapshot_bytes_per_sec` throttle. */ total_throttled: Duration + /** The total time spent waiting due to the `max_snapshot_bytes_per_sec` throttle, in nanoseconds. */ total_throttled_nanos: long } export interface SnapshotRepositoryVerifyIntegrityRequest extends RequestBase { -/** The name of the snapshot repository. */ + /** The name of the snapshot repository. */ name: Names /** If `verify_blob_contents` is `true`, this parameter specifies how many blobs to verify at once. */ blob_thread_pool_concurrency?: integer /** The maximum number of index snapshots to verify concurrently within each index verification. */ index_snapshot_verification_concurrency?: integer - /** The number of indices to verify concurrently. The default behavior is to use the entire `snapshot_meta` thread pool. */ + /** The number of indices to verify concurrently. + * The default behavior is to use the entire `snapshot_meta` thread pool. */ index_verification_concurrency?: integer /** If `verify_blob_contents` is `true`, this parameter specifies the maximum amount of data that Elasticsearch will read from the repository every second. */ max_bytes_per_sec?: string - /** The number of shard snapshot failures to track during integrity verification, in order to avoid excessive resource usage. If your repository contains more than this number of shard snapshot failures, the verification will fail. */ + /** The number of shard snapshot failures to track during integrity verification, in order to avoid excessive resource usage. + * If your repository contains more than this number of shard snapshot failures, the verification will fail. */ max_failed_shard_snapshots?: integer - /** The maximum number of snapshot metadata operations to run concurrently. The default behavior is to use at most half of the `snapshot_meta` thread pool at once. */ + /** The maximum number of snapshot metadata operations to run concurrently. + * The default behavior is to use at most half of the `snapshot_meta` thread pool at once. */ meta_thread_pool_concurrency?: integer - /** The number of snapshots to verify concurrently. The default behavior is to use at most half of the `snapshot_meta` thread pool at once. */ + /** The number of snapshots to verify concurrently. + * The default behavior is to use at most half of the `snapshot_meta` thread pool at once. */ snapshot_verification_concurrency?: integer - /** Indicates whether to verify the checksum of every data blob in the repository. If this feature is enabled, Elasticsearch will read the entire repository contents, which may be extremely slow and expensive. */ + /** Indicates whether to verify the checksum of every data blob in the repository. + * If this feature is enabled, Elasticsearch will read the entire repository contents, which may be extremely slow and expensive. */ verify_blob_contents?: boolean /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { name?: never, blob_thread_pool_concurrency?: never, index_snapshot_verification_concurrency?: never, index_verification_concurrency?: never, max_bytes_per_sec?: never, max_failed_shard_snapshots?: never, meta_thread_pool_concurrency?: never, snapshot_verification_concurrency?: never, verify_blob_contents?: never } @@ -23808,31 +34419,78 @@ export interface SnapshotRepositoryVerifyIntegrityRequest extends RequestBase { export type SnapshotRepositoryVerifyIntegrityResponse = any export interface SnapshotRestoreRequest extends RequestBase { -/** The name of the repository to restore a snapshot from. */ + /** The name of the repository to restore a snapshot from. */ repository: Name /** The name of the snapshot to restore. */ snapshot: Name - /** The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. */ + /** The period to wait for the master node. + * If the master node is not available before the timeout expires, the request fails and returns an error. + * To indicate that the request should never timeout, set it to `-1`. */ master_timeout?: Duration - /** If `true`, the request returns a response when the restore operation completes. The operation is complete when it finishes all attempts to recover primary shards for restored indices. This applies even if one or more of the recovery attempts fail. If `false`, the request returns a response when the restore operation initializes. */ + /** If `true`, the request returns a response when the restore operation completes. + * The operation is complete when it finishes all attempts to recover primary shards for restored indices. + * This applies even if one or more of the recovery attempts fail. + * + * If `false`, the request returns a response when the restore operation initializes. */ wait_for_completion?: boolean - /** The feature states to restore. If `include_global_state` is `true`, the request restores all feature states in the snapshot by default. If `include_global_state` is `false`, the request restores no feature states by default. Note that specifying an empty array will result in the default behavior. To restore no feature states, regardless of the `include_global_state` value, specify an array containing only the value `none` (`["none"]`). */ + /** The feature states to restore. + * If `include_global_state` is `true`, the request restores all feature states in the snapshot by default. + * If `include_global_state` is `false`, the request restores no feature states by default. + * Note that specifying an empty array will result in the default behavior. + * To restore no feature states, regardless of the `include_global_state` value, specify an array containing only the value `none` (`["none"]`). */ feature_states?: string[] - /** The index settings to not restore from the snapshot. You can't use this option to ignore `index.number_of_shards`. For data streams, this option applies only to restored backing indices. New backing indices are configured using the data stream's matching index template. */ + /** The index settings to not restore from the snapshot. + * You can't use this option to ignore `index.number_of_shards`. + * + * For data streams, this option applies only to restored backing indices. + * New backing indices are configured using the data stream's matching index template. */ ignore_index_settings?: string[] - /** If `true`, the request ignores any index or data stream in indices that's missing from the snapshot. If `false`, the request returns an error for any missing index or data stream. */ + /** If `true`, the request ignores any index or data stream in indices that's missing from the snapshot. + * If `false`, the request returns an error for any missing index or data stream. */ ignore_unavailable?: boolean - /** If `true`, the request restores aliases for any restored data streams and indices. If `false`, the request doesn’t restore aliases. */ + /** If `true`, the request restores aliases for any restored data streams and indices. + * If `false`, the request doesn’t restore aliases. */ include_aliases?: boolean - /** If `true`, restore the cluster state. The cluster state includes: * Persistent cluster settings * Index templates * Legacy index templates * Ingest pipelines * Index lifecycle management (ILM) policies * Stored scripts * For snapshots taken after 7.12.0, feature states If `include_global_state` is `true`, the restore operation merges the legacy index templates in your cluster with the templates contained in the snapshot, replacing any existing ones whose name matches one in the snapshot. It completely removes all persistent settings, non-legacy index templates, ingest pipelines, and ILM lifecycle policies that exist in your cluster and replaces them with the corresponding items from the snapshot. Use the `feature_states` parameter to configure how feature states are restored. If `include_global_state` is `true` and a snapshot was created without a global state then the restore request will fail. */ + /** If `true`, restore the cluster state. The cluster state includes: + * + * * Persistent cluster settings + * * Index templates + * * Legacy index templates + * * Ingest pipelines + * * Index lifecycle management (ILM) policies + * * Stored scripts + * * For snapshots taken after 7.12.0, feature states + * + * If `include_global_state` is `true`, the restore operation merges the legacy index templates in your cluster with the templates contained in the snapshot, replacing any existing ones whose name matches one in the snapshot. + * It completely removes all persistent settings, non-legacy index templates, ingest pipelines, and ILM lifecycle policies that exist in your cluster and replaces them with the corresponding items from the snapshot. + * + * Use the `feature_states` parameter to configure how feature states are restored. + * + * If `include_global_state` is `true` and a snapshot was created without a global state then the restore request will fail. */ include_global_state?: boolean - /** Index settings to add or change in restored indices, including backing indices. You can't use this option to change `index.number_of_shards`. For data streams, this option applies only to restored backing indices. New backing indices are configured using the data stream's matching index template. */ + /** Index settings to add or change in restored indices, including backing indices. + * You can't use this option to change `index.number_of_shards`. + * + * For data streams, this option applies only to restored backing indices. + * New backing indices are configured using the data stream's matching index template. */ index_settings?: IndicesIndexSettings - /** A comma-separated list of indices and data streams to restore. It supports a multi-target syntax. The default behavior is all regular indices and regular data streams in the snapshot. You can't use this parameter to restore system indices or system data streams. Use `feature_states` instead. */ + /** A comma-separated list of indices and data streams to restore. + * It supports a multi-target syntax. + * The default behavior is all regular indices and regular data streams in the snapshot. + * + * You can't use this parameter to restore system indices or system data streams. + * Use `feature_states` instead. */ indices?: Indices - /** If `false`, the entire restore operation will fail if one or more indices included in the snapshot do not have all primary shards available. If true, it allows restoring a partial snapshot of indices with unavailable shards. Only shards that were successfully included in the snapshot will be restored. All missing shards will be recreated as empty. */ + /** If `false`, the entire restore operation will fail if one or more indices included in the snapshot do not have all primary shards available. + * + * If true, it allows restoring a partial snapshot of indices with unavailable shards. + * Only shards that were successfully included in the snapshot will be restored. + * All missing shards will be recreated as empty. */ partial?: boolean - /** A rename pattern to apply to restored data streams and indices. Data streams and indices matching the rename pattern will be renamed according to `rename_replacement`. The rename pattern is applied as defined by the regular expression that supports referencing the original text, according to the `appendReplacement` logic. */ + /** A rename pattern to apply to restored data streams and indices. + * Data streams and indices matching the rename pattern will be renamed according to `rename_replacement`. + * + * The rename pattern is applied as defined by the regular expression that supports referencing the original text, according to the `appendReplacement` logic. */ rename_pattern?: string /** The rename replacement string that is used with the `rename_pattern`. */ rename_replacement?: string @@ -23854,13 +34512,19 @@ export interface SnapshotRestoreSnapshotRestore { } export interface SnapshotStatusRequest extends RequestBase { -/** The snapshot repository name used to limit the request. It supports wildcards (`*`) if `` isn't specified. */ + /** The snapshot repository name used to limit the request. + * It supports wildcards (`*`) if `` isn't specified. */ repository?: Name - /** A comma-separated list of snapshots to retrieve status for. The default is currently running snapshots. Wildcards (`*`) are not supported. */ + /** A comma-separated list of snapshots to retrieve status for. + * The default is currently running snapshots. + * Wildcards (`*`) are not supported. */ snapshot?: Names - /** If `false`, the request returns an error for any snapshots that are unavailable. If `true`, the request ignores snapshots that are unavailable, such as those that are corrupted or temporarily cannot be returned. */ + /** If `false`, the request returns an error for any snapshots that are unavailable. + * If `true`, the request ignores snapshots that are unavailable, such as those that are corrupted or temporarily cannot be returned. */ ignore_unavailable?: boolean - /** The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. */ + /** The period to wait for the master node. + * If the master node is not available before the timeout expires, the request fails and returns an error. + * To indicate that the request should never timeout, set it to `-1`. */ master_timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { repository?: never, snapshot?: never, ignore_unavailable?: never, master_timeout?: never } @@ -23873,15 +34537,22 @@ export interface SnapshotStatusResponse { } export interface SnapshotVerifyRepositoryCompactNodeInfo { + /** A human-readable name for the node. + * You can set this name using the `node.name` property in `elasticsearch.yml`. + * The default value is the machine's hostname. */ name: Name } export interface SnapshotVerifyRepositoryRequest extends RequestBase { -/** The name of the snapshot repository to verify. */ + /** The name of the snapshot repository to verify. */ name: Name - /** The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. */ + /** The period to wait for the master node. + * If the master node is not available before the timeout expires, the request fails and returns an error. + * To indicate that the request should never timeout, set it to `-1`. */ master_timeout?: Duration - /** The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. If no response is received before the timeout expires, the cluster metadata update still applies but the response will indicate that it was not completely acknowledged. To indicate that the request should never timeout, set it to `-1`. */ + /** The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. + * If no response is received before the timeout expires, the cluster metadata update still applies but the response will indicate that it was not completely acknowledged. + * To indicate that the request should never timeout, set it to `-1`. */ timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { name?: never, master_timeout?: never, timeout?: never } @@ -23890,6 +34561,8 @@ export interface SnapshotVerifyRepositoryRequest extends RequestBase { } export interface SnapshotVerifyRepositoryResponse { + /** Information about the nodes connected to the snapshot repository. + * The key is the ID of the node. */ nodes: Record } @@ -23901,7 +34574,7 @@ export interface SqlColumn { export type SqlRow = any[] export interface SqlClearCursorRequest extends RequestBase { -/** Cursor to clear. */ + /** Cursor to clear. */ cursor: string /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { cursor?: never } @@ -23914,7 +34587,7 @@ export interface SqlClearCursorResponse { } export interface SqlDeleteAsyncRequest extends RequestBase { -/** The identifier for the search. */ + /** The identifier for the search. */ id: Id /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { id?: never } @@ -23925,15 +34598,20 @@ export interface SqlDeleteAsyncRequest extends RequestBase { export type SqlDeleteAsyncResponse = AcknowledgedResponseBase export interface SqlGetAsyncRequest extends RequestBase { -/** The identifier for the search. */ + /** The identifier for the search. */ id: Id - /** The separator for CSV results. The API supports this parameter only for CSV responses. */ + /** The separator for CSV results. + * The API supports this parameter only for CSV responses. */ delimiter?: string - /** The format for the response. You must specify a format using this parameter or the `Accept` HTTP header. If you specify both, the API uses this parameter. */ + /** The format for the response. + * You must specify a format using this parameter or the `Accept` HTTP header. + * If you specify both, the API uses this parameter. */ format?: string - /** The retention period for the search and its results. It defaults to the `keep_alive` period for the original SQL search. */ + /** The retention period for the search and its results. + * It defaults to the `keep_alive` period for the original SQL search. */ keep_alive?: Duration - /** The period to wait for complete results. It defaults to no timeout, meaning the request waits for complete search results. */ + /** The period to wait for complete results. + * It defaults to no timeout, meaning the request waits for complete search results. */ wait_for_completion_timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { id?: never, delimiter?: never, format?: never, keep_alive?: never, wait_for_completion_timeout?: never } @@ -23942,16 +34620,32 @@ export interface SqlGetAsyncRequest extends RequestBase { } export interface SqlGetAsyncResponse { + /** Identifier for the search. + * This value is returned only for async and saved synchronous searches. + * For CSV, TSV, and TXT responses, this value is returned in the `Async-ID` HTTP header. */ id: Id + /** If `true`, the search is still running. + * If `false`, the search has finished. + * This value is returned only for async and saved synchronous searches. + * For CSV, TSV, and TXT responses, this value is returned in the `Async-partial` HTTP header. */ is_running: boolean + /** If `true`, the response does not contain complete search results. + * If `is_partial` is `true` and `is_running` is `true`, the search is still running. + * If `is_partial` is `true` but `is_running` is `false`, the results are partial due to a failure or timeout. + * This value is returned only for async and saved synchronous searches. + * For CSV, TSV, and TXT responses, this value is returned in the `Async-partial` HTTP header. */ is_partial: boolean + /** Column headings for the search results. Each object is a column. */ columns?: SqlColumn[] + /** The cursor for the next set of paginated results. + * For CSV, TSV, and TXT responses, this value is returned in the `Cursor` HTTP header. */ cursor?: string + /** The values for the search results. */ rows: SqlRow[] } export interface SqlGetAsyncStatusRequest extends RequestBase { -/** The identifier for the search. */ + /** The identifier for the search. */ id: Id /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { id?: never } @@ -23960,28 +34654,47 @@ export interface SqlGetAsyncStatusRequest extends RequestBase { } export interface SqlGetAsyncStatusResponse { + /** The timestamp, in milliseconds since the Unix epoch, when Elasticsearch will delete the search and its results, even if the search is still running. */ expiration_time_in_millis: EpochTime + /** The identifier for the search. */ id: string + /** If `true`, the search is still running. + * If `false`, the search has finished. */ is_running: boolean + /** If `true`, the response does not contain complete search results. + * If `is_partial` is `true` and `is_running` is `true`, the search is still running. + * If `is_partial` is `true` but `is_running` is `false`, the results are partial due to a failure or timeout. */ is_partial: boolean + /** The timestamp, in milliseconds since the Unix epoch, when the search started. + * The API returns this property only for running searches. */ start_time_in_millis: EpochTime + /** The HTTP status code for the search. + * The API returns this property only for completed searches. */ completion_status?: uint } export interface SqlQueryRequest extends RequestBase { -/** The format for the response. You can also specify a format using the `Accept` HTTP header. If you specify both this parameter and the `Accept` HTTP header, this parameter takes precedence. */ + /** The format for the response. + * You can also specify a format using the `Accept` HTTP header. + * If you specify both this parameter and the `Accept` HTTP header, this parameter takes precedence. */ format?: SqlQuerySqlFormat - /** If `true`, the response has partial results when there are shard request timeouts or shard failures. If `false`, the API returns an error with no partial results. */ + /** If `true`, the response has partial results when there are shard request timeouts or shard failures. + * If `false`, the API returns an error with no partial results. */ allow_partial_search_results?: boolean - /** The default catalog (cluster) for queries. If unspecified, the queries execute on the data in the local cluster only. */ + /** The default catalog (cluster) for queries. + * If unspecified, the queries execute on the data in the local cluster only. */ catalog?: string - /** If `true`, the results are in a columnar fashion: one row represents all the values of a certain column from the current page of results. The API supports this parameter only for CBOR, JSON, SMILE, and YAML responses. */ + /** If `true`, the results are in a columnar fashion: one row represents all the values of a certain column from the current page of results. + * The API supports this parameter only for CBOR, JSON, SMILE, and YAML responses. */ columnar?: boolean - /** The cursor used to retrieve a set of paginated results. If you specify a cursor, the API only uses the `columnar` and `time_zone` request body parameters. It ignores other request body parameters. */ + /** The cursor used to retrieve a set of paginated results. + * If you specify a cursor, the API only uses the `columnar` and `time_zone` request body parameters. + * It ignores other request body parameters. */ cursor?: string /** The maximum number of rows (or entries) to return in one response. */ fetch_size?: integer - /** If `false`, the API returns an exception when encountering multiple values for a field. If `true`, the API is lenient and returns the first value from the array with no guarantee of consistent results. */ + /** If `false`, the API returns an exception when encountering multiple values for a field. + * If `true`, the API is lenient and returns the first value from the array with no guarantee of consistent results. */ field_multi_value_leniency?: boolean /** The Elasticsearch query DSL for additional filtering. */ filter?: QueryDslQueryContainer @@ -23989,9 +34702,12 @@ export interface SqlQueryRequest extends RequestBase { index_using_frozen?: boolean /** The retention period for an async or saved synchronous search. */ keep_alive?: Duration - /** If `true`, Elasticsearch stores synchronous searches if you also specify the `wait_for_completion_timeout` parameter. If `false`, Elasticsearch only stores async searches that don't finish before the `wait_for_completion_timeout`. */ + /** If `true`, Elasticsearch stores synchronous searches if you also specify the `wait_for_completion_timeout` parameter. + * If `false`, Elasticsearch only stores async searches that don't finish before the `wait_for_completion_timeout`. */ keep_on_completion?: boolean - /** The minimum retention period for the scroll cursor. After this time period, a pagination request might fail because the scroll cursor is no longer available. Subsequent scroll requests prolong the lifetime of the scroll cursor by the duration of `page_timeout` in the scroll request. */ + /** The minimum retention period for the scroll cursor. + * After this time period, a pagination request might fail because the scroll cursor is no longer available. + * Subsequent scroll requests prolong the lifetime of the scroll cursor by the duration of `page_timeout` in the scroll request. */ page_timeout?: Duration /** The values for parameters in the query. */ params?: Record @@ -23999,11 +34715,16 @@ export interface SqlQueryRequest extends RequestBase { query?: string /** The timeout before the request fails. */ request_timeout?: Duration - /** One or more runtime fields for the search request. These fields take precedence over mapped fields with the same name. */ + /** One or more runtime fields for the search request. + * These fields take precedence over mapped fields with the same name. */ runtime_mappings?: MappingRuntimeFields /** The ISO-8601 time zone ID for the search. */ time_zone?: TimeZone - /** The period to wait for complete results. It defaults to no timeout, meaning the request waits for complete search results. If the search doesn't finish within this period, the search becomes async. To save a synchronous search, you must specify this parameter and the `keep_on_completion` parameter. */ + /** The period to wait for complete results. + * It defaults to no timeout, meaning the request waits for complete search results. + * If the search doesn't finish within this period, the search becomes async. + * + * To save a synchronous search, you must specify this parameter and the `keep_on_completion` parameter. */ wait_for_completion_timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { format?: never, allow_partial_search_results?: never, catalog?: never, columnar?: never, cursor?: never, fetch_size?: never, field_multi_value_leniency?: never, filter?: never, index_using_frozen?: never, keep_alive?: never, keep_on_completion?: never, page_timeout?: never, params?: never, query?: never, request_timeout?: never, runtime_mappings?: never, time_zone?: never, wait_for_completion_timeout?: never } @@ -24012,18 +34733,34 @@ export interface SqlQueryRequest extends RequestBase { } export interface SqlQueryResponse { + /** Column headings for the search results. Each object is a column. */ columns?: SqlColumn[] + /** The cursor for the next set of paginated results. + * For CSV, TSV, and TXT responses, this value is returned in the `Cursor` HTTP header. */ cursor?: string + /** The identifier for the search. + * This value is returned only for async and saved synchronous searches. + * For CSV, TSV, and TXT responses, this value is returned in the `Async-ID` HTTP header. */ id?: Id + /** If `true`, the search is still running. + * If `false`, the search has finished. + * This value is returned only for async and saved synchronous searches. + * For CSV, TSV, and TXT responses, this value is returned in the `Async-partial` HTTP header. */ is_running?: boolean + /** If `true`, the response does not contain complete search results. + * If `is_partial` is `true` and `is_running` is `true`, the search is still running. + * If `is_partial` is `true` but `is_running` is `false`, the results are partial due to a failure or timeout. + * This value is returned only for async and saved synchronous searches. + * For CSV, TSV, and TXT responses, this value is returned in the `Async-partial` HTTP header. */ is_partial?: boolean + /** The values for the search results. */ rows: SqlRow[] } export type SqlQuerySqlFormat = 'csv' | 'json' | 'tsv' | 'txt' | 'yaml' | 'cbor' | 'smile' export interface SqlTranslateRequest extends RequestBase { -/** The maximum number of rows (or entries) to return in one response. */ + /** The maximum number of rows (or entries) to return in one response. */ fetch_size?: integer /** The Elasticsearch query DSL for additional filtering. */ filter?: QueryDslQueryContainer @@ -24047,13 +34784,23 @@ export interface SqlTranslateResponse { } export interface SslCertificatesCertificateInformation { + /** If the path refers to a container file (a jks keystore, or a PKCS#12 file), it is the alias of the certificate. + * Otherwise, it is null. */ alias: string | null + /** The ISO formatted date of the certificate's expiry (not-after) date. */ expiry: DateTime + /** The format of the file. + * Valid values include `jks`, `PKCS12`, and `PEM`. */ format: string + /** Indicates whether Elasticsearch has access to the private key for this certificate. */ has_private_key: boolean + /** The Distinguished Name of the certificate's issuer. */ issuer?: string + /** The path to the certificate, as configured in the `elasticsearch.yml` file. */ path: string + /** The hexadecimal representation of the certificate's serial number. */ serial_number: string + /** The Distinguished Name of the certificate's subject. */ subject_dn: string } @@ -24067,24 +34814,32 @@ export interface SslCertificatesRequest extends RequestBase { export type SslCertificatesResponse = SslCertificatesCertificateInformation[] export interface SynonymsSynonymRule { + /** The identifier for the synonym rule. + * If you do not specify a synonym rule ID when you create a rule, an identifier is created automatically by Elasticsearch. */ id?: Id + /** The synonyms that conform the synonym rule in Solr format. */ synonyms: SynonymsSynonymString } export interface SynonymsSynonymRuleRead { + /** Synonym Rule identifier */ id: Id + /** Synonyms, in Solr format, that conform the synonym rule. */ synonyms: SynonymsSynonymString } export type SynonymsSynonymString = string export interface SynonymsSynonymsUpdateResult { + /** The update operation result. */ result: Result + /** Updating synonyms in a synonym set reloads the associated analyzers. + * This information is the analyzers reloading result. */ reload_analyzers_details: IndicesReloadSearchAnalyzersReloadResult } export interface SynonymsDeleteSynonymRequest extends RequestBase { -/** The synonyms set identifier to delete. */ + /** The synonyms set identifier to delete. */ id: Id /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { id?: never } @@ -24095,7 +34850,7 @@ export interface SynonymsDeleteSynonymRequest extends RequestBase { export type SynonymsDeleteSynonymResponse = AcknowledgedResponseBase export interface SynonymsDeleteSynonymRuleRequest extends RequestBase { -/** The ID of the synonym set to update. */ + /** The ID of the synonym set to update. */ set_id: Id /** The ID of the synonym rule to delete. */ rule_id: Id @@ -24108,7 +34863,7 @@ export interface SynonymsDeleteSynonymRuleRequest extends RequestBase { export type SynonymsDeleteSynonymRuleResponse = SynonymsSynonymsUpdateResult export interface SynonymsGetSynonymRequest extends RequestBase { -/** The synonyms set identifier to retrieve. */ + /** The synonyms set identifier to retrieve. */ id: Id /** The starting offset for query rules to retrieve. */ from?: integer @@ -24121,12 +34876,14 @@ export interface SynonymsGetSynonymRequest extends RequestBase { } export interface SynonymsGetSynonymResponse { + /** The total number of synonyms rules that the synonyms set contains. */ count: integer + /** Synonym rule details. */ synonyms_set: SynonymsSynonymRuleRead[] } export interface SynonymsGetSynonymRuleRequest extends RequestBase { -/** The ID of the synonym set to retrieve the synonym rule from. */ + /** The ID of the synonym set to retrieve the synonym rule from. */ set_id: Id /** The ID of the synonym rule to retrieve. */ rule_id: Id @@ -24139,7 +34896,7 @@ export interface SynonymsGetSynonymRuleRequest extends RequestBase { export type SynonymsGetSynonymRuleResponse = SynonymsSynonymRuleRead export interface SynonymsGetSynonymsSetsRequest extends RequestBase { -/** The starting offset for synonyms sets to retrieve. */ + /** The starting offset for synonyms sets to retrieve. */ from?: integer /** The maximum number of synonyms sets to retrieve. */ size?: integer @@ -24150,17 +34907,21 @@ export interface SynonymsGetSynonymsSetsRequest extends RequestBase { } export interface SynonymsGetSynonymsSetsResponse { + /** The total number of synonyms sets defined. */ count: integer + /** The identifier and total number of defined synonym rules for each synonyms set. */ results: SynonymsGetSynonymsSetsSynonymsSetItem[] } export interface SynonymsGetSynonymsSetsSynonymsSetItem { + /** Synonyms set identifier */ synonyms_set: Id + /** Number of synonym rules that the synonym set contains */ count: integer } export interface SynonymsPutSynonymRequest extends RequestBase { -/** The ID of the synonyms set to be created or updated. */ + /** The ID of the synonyms set to be created or updated. */ id: Id /** The synonym rules definitions for the synonyms set. */ synonyms_set: SynonymsSynonymRule | SynonymsSynonymRule[] @@ -24176,7 +34937,7 @@ export interface SynonymsPutSynonymResponse { } export interface SynonymsPutSynonymRuleRequest extends RequestBase { -/** The ID of the synonym set. */ + /** The ID of the synonym set. */ set_id: Id /** The ID of the synonym rule to be updated or created. */ rule_id: Id @@ -24210,6 +34971,10 @@ export interface TasksTaskInfo { action: string cancelled?: boolean cancellable: boolean + /** Human readable text that identifies the particular request that the task is performing. + * For example, it might identify the search request being performed by a search task. + * Other kinds of tasks have different descriptions, like `_reindex` which has the source and the destination, or `_bulk` which just has the number of requests and the destination indices. + * Many requests will have only an empty description because more detailed information about the request is not easily available or particularly helpful in identifying the request. */ description?: string headers: Record id: long @@ -24217,6 +34982,10 @@ export interface TasksTaskInfo { running_time?: Duration running_time_in_nanos: DurationValue start_time_in_millis: EpochTime + /** The internal status of the task, which varies from task to task. + * The format also varies. + * While the goal is to keep the status for a particular task consistent from version to version, this is not always possible because sometimes the implementation changes. + * Fields might be removed from the status for a particular request so any parsing you do of the status might break in minor releases. */ status?: any type: string parent_task_id?: TaskId @@ -24227,12 +34996,15 @@ export type TasksTaskInfos = TasksTaskInfo[] | Record + /** Either a flat list of tasks if `group_by` was set to `none`, or grouped by parents if + * `group_by` was set to `parents`. */ tasks?: TasksTaskInfos } export interface TasksCancelRequest extends RequestBase { -/** The task identifier. */ + /** The task identifier. */ task_id?: TaskId /** A comma-separated list or wildcard expression of actions that is used to limit the request. */ actions?: string | string[] @@ -24251,9 +35023,10 @@ export interface TasksCancelRequest extends RequestBase { export type TasksCancelResponse = TasksTaskListResponseBase export interface TasksGetRequest extends RequestBase { -/** The task identifier. */ + /** The task identifier. */ task_id: Id - /** The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ + /** The period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration /** If `true`, the request blocks until the task has completed. */ wait_for_completion?: boolean @@ -24271,17 +35044,24 @@ export interface TasksGetResponse { } export interface TasksListRequest extends RequestBase { -/** A comma-separated list or wildcard expression of actions used to limit the request. For example, you can use `cluser:*` to retrieve all cluster-related tasks. */ + /** A comma-separated list or wildcard expression of actions used to limit the request. + * For example, you can use `cluser:*` to retrieve all cluster-related tasks. */ actions?: string | string[] - /** If `true`, the response includes detailed information about the running tasks. This information is useful to distinguish tasks from each other but is more costly to run. */ + /** If `true`, the response includes detailed information about the running tasks. + * This information is useful to distinguish tasks from each other but is more costly to run. */ detailed?: boolean - /** A key that is used to group tasks in the response. The task lists can be grouped either by nodes or by parent tasks. */ + /** A key that is used to group tasks in the response. + * The task lists can be grouped either by nodes or by parent tasks. */ group_by?: TasksGroupBy /** A comma-separated list of node IDs or names that is used to limit the returned information. */ nodes?: NodeIds - /** A parent task identifier that is used to limit returned information. To return all tasks, omit this parameter or use a value of `-1`. If the parent task is not found, the API does not return a 404 response code. */ + /** A parent task identifier that is used to limit returned information. + * To return all tasks, omit this parameter or use a value of `-1`. + * If the parent task is not found, the API does not return a 404 response code. */ parent_task_id?: Id - /** The period to wait for each node to respond. If a node does not respond before its timeout expires, the response does not include its information. However, timed out nodes are included in the `node_failures` property. */ + /** The period to wait for each node to respond. + * If a node does not respond before its timeout expires, the response does not include its information. + * However, timed out nodes are included in the `node_failures` property. */ timeout?: Duration /** If `true`, the request blocks until the operation is complete. */ wait_for_completion?: boolean @@ -24315,33 +35095,98 @@ export interface TextStructureTopHit { } export interface TextStructureFindFieldStructureRequest extends RequestBase { -/** If `format` is set to `delimited`, you can specify the column names in a comma-separated list. If this parameter is not specified, the structure finder uses the column names from the header row of the text. If the text does not have a header row, columns are named "column1", "column2", "column3", for example. */ + /** If `format` is set to `delimited`, you can specify the column names in a comma-separated list. + * If this parameter is not specified, the structure finder uses the column names from the header row of the text. + * If the text does not have a header row, columns are named "column1", "column2", "column3", for example. */ column_names?: string - /** If you have set `format` to `delimited`, you can specify the character used to delimit the values in each row. Only a single character is supported; the delimiter cannot have multiple characters. By default, the API considers the following possibilities: comma, tab, semi-colon, and pipe (`|`). In this default scenario, all rows must have the same number of fields for the delimited format to be detected. If you specify a delimiter, up to 10% of the rows can have a different number of columns than the first row. */ + /** If you have set `format` to `delimited`, you can specify the character used to delimit the values in each row. + * Only a single character is supported; the delimiter cannot have multiple characters. + * By default, the API considers the following possibilities: comma, tab, semi-colon, and pipe (`|`). + * In this default scenario, all rows must have the same number of fields for the delimited format to be detected. + * If you specify a delimiter, up to 10% of the rows can have a different number of columns than the first row. */ delimiter?: string - /** The number of documents to include in the structural analysis. The minimum value is 2. */ + /** The number of documents to include in the structural analysis. + * The minimum value is 2. */ documents_to_sample?: uint - /** The mode of compatibility with ECS compliant Grok patterns. Use this parameter to specify whether to use ECS Grok patterns instead of legacy ones when the structure finder creates a Grok pattern. This setting primarily has an impact when a whole message Grok pattern such as `%{CATALINALOG}` matches the input. If the structure finder identifies a common structure but has no idea of the meaning then generic field names such as `path`, `ipaddress`, `field1`, and `field2` are used in the `grok_pattern` output. The intention in that situation is that a user who knows the meanings will rename the fields before using them. */ + /** The mode of compatibility with ECS compliant Grok patterns. + * Use this parameter to specify whether to use ECS Grok patterns instead of legacy ones when the structure finder creates a Grok pattern. + * This setting primarily has an impact when a whole message Grok pattern such as `%{CATALINALOG}` matches the input. + * If the structure finder identifies a common structure but has no idea of the meaning then generic field names such as `path`, `ipaddress`, `field1`, and `field2` are used in the `grok_pattern` output. + * The intention in that situation is that a user who knows the meanings will rename the fields before using them. */ ecs_compatibility?: TextStructureEcsCompatibilityType /** If `true`, the response includes a field named `explanation`, which is an array of strings that indicate how the structure finder produced its result. */ explain?: boolean /** The field that should be analyzed. */ field: Field - /** The high level structure of the text. By default, the API chooses the format. In this default scenario, all rows must have the same number of fields for a delimited format to be detected. If the format is set to delimited and the delimiter is not set, however, the API tolerates up to 5% of rows that have a different number of columns than the first row. */ + /** The high level structure of the text. + * By default, the API chooses the format. + * In this default scenario, all rows must have the same number of fields for a delimited format to be detected. + * If the format is set to delimited and the delimiter is not set, however, the API tolerates up to 5% of rows that have a different number of columns than the first row. */ format?: TextStructureFormatType - /** If the format is `semi_structured_text`, you can specify a Grok pattern that is used to extract fields from every message in the text. The name of the timestamp field in the Grok pattern must match what is specified in the `timestamp_field` parameter. If that parameter is not specified, the name of the timestamp field in the Grok pattern must match "timestamp". If `grok_pattern` is not specified, the structure finder creates a Grok pattern. */ + /** If the format is `semi_structured_text`, you can specify a Grok pattern that is used to extract fields from every message in the text. + * The name of the timestamp field in the Grok pattern must match what is specified in the `timestamp_field` parameter. + * If that parameter is not specified, the name of the timestamp field in the Grok pattern must match "timestamp". + * If `grok_pattern` is not specified, the structure finder creates a Grok pattern. */ grok_pattern?: GrokPattern /** The name of the index that contains the analyzed field. */ index: IndexName - /** If the format is `delimited`, you can specify the character used to quote the values in each row if they contain newlines or the delimiter character. Only a single character is supported. If this parameter is not specified, the default value is a double quote (`"`). If your delimited text format does not use quoting, a workaround is to set this argument to a character that does not appear anywhere in the sample. */ + /** If the format is `delimited`, you can specify the character used to quote the values in each row if they contain newlines or the delimiter character. + * Only a single character is supported. + * If this parameter is not specified, the default value is a double quote (`"`). + * If your delimited text format does not use quoting, a workaround is to set this argument to a character that does not appear anywhere in the sample. */ quote?: string - /** If the format is `delimited`, you can specify whether values between delimiters should have whitespace trimmed from them. If this parameter is not specified and the delimiter is pipe (`|`), the default value is true. Otherwise, the default value is `false`. */ + /** If the format is `delimited`, you can specify whether values between delimiters should have whitespace trimmed from them. + * If this parameter is not specified and the delimiter is pipe (`|`), the default value is true. + * Otherwise, the default value is `false`. */ should_trim_fields?: boolean - /** The maximum amount of time that the structure analysis can take. If the analysis is still running when the timeout expires, it will be stopped. */ + /** The maximum amount of time that the structure analysis can take. + * If the analysis is still running when the timeout expires, it will be stopped. */ timeout?: Duration - /** The name of the field that contains the primary timestamp of each record in the text. In particular, if the text was ingested into an index, this is the field that would be used to populate the `@timestamp` field. If the format is `semi_structured_text`, this field must match the name of the appropriate extraction in the `grok_pattern`. Therefore, for semi-structured text, it is best not to specify this parameter unless `grok_pattern` is also specified. For structured text, if you specify this parameter, the field must exist within the text. If this parameter is not specified, the structure finder makes a decision about which field (if any) is the primary timestamp field. For structured text, it is not compulsory to have a timestamp in the text. */ + /** The name of the field that contains the primary timestamp of each record in the text. + * In particular, if the text was ingested into an index, this is the field that would be used to populate the `@timestamp` field. + * + * If the format is `semi_structured_text`, this field must match the name of the appropriate extraction in the `grok_pattern`. + * Therefore, for semi-structured text, it is best not to specify this parameter unless `grok_pattern` is also specified. + * + * For structured text, if you specify this parameter, the field must exist within the text. + * + * If this parameter is not specified, the structure finder makes a decision about which field (if any) is the primary timestamp field. + * For structured text, it is not compulsory to have a timestamp in the text. */ timestamp_field?: Field - /** The Java time format of the timestamp field in the text. Only a subset of Java time format letter groups are supported: * `a` * `d` * `dd` * `EEE` * `EEEE` * `H` * `HH` * `h` * `M` * `MM` * `MMM` * `MMMM` * `mm` * `ss` * `XX` * `XXX` * `yy` * `yyyy` * `zzz` Additionally `S` letter groups (fractional seconds) of length one to nine are supported providing they occur after `ss` and are separated from the `ss` by a period (`.`), comma (`,`), or colon (`:`). Spacing and punctuation is also permitted with the exception a question mark (`?`), newline, and carriage return, together with literal text enclosed in single quotes. For example, `MM/dd HH.mm.ss,SSSSSS 'in' yyyy` is a valid override format. One valuable use case for this parameter is when the format is semi-structured text, there are multiple timestamp formats in the text, and you know which format corresponds to the primary timestamp, but you do not want to specify the full `grok_pattern`. Another is when the timestamp format is one that the structure finder does not consider by default. If this parameter is not specified, the structure finder chooses the best format from a built-in set. If the special value `null` is specified, the structure finder will not look for a primary timestamp in the text. When the format is semi-structured text, this will result in the structure finder treating the text as single-line messages. */ + /** The Java time format of the timestamp field in the text. + * Only a subset of Java time format letter groups are supported: + * + * * `a` + * * `d` + * * `dd` + * * `EEE` + * * `EEEE` + * * `H` + * * `HH` + * * `h` + * * `M` + * * `MM` + * * `MMM` + * * `MMMM` + * * `mm` + * * `ss` + * * `XX` + * * `XXX` + * * `yy` + * * `yyyy` + * * `zzz` + * + * Additionally `S` letter groups (fractional seconds) of length one to nine are supported providing they occur after `ss` and are separated from the `ss` by a period (`.`), comma (`,`), or colon (`:`). + * Spacing and punctuation is also permitted with the exception a question mark (`?`), newline, and carriage return, together with literal text enclosed in single quotes. + * For example, `MM/dd HH.mm.ss,SSSSSS 'in' yyyy` is a valid override format. + * + * One valuable use case for this parameter is when the format is semi-structured text, there are multiple timestamp formats in the text, and you know which format corresponds to the primary timestamp, but you do not want to specify the full `grok_pattern`. + * Another is when the timestamp format is one that the structure finder does not consider by default. + * + * If this parameter is not specified, the structure finder chooses the best format from a built-in set. + * + * If the special value `null` is specified, the structure finder will not look for a primary timestamp in the text. + * When the format is semi-structured text, this will result in the structure finder treating the text as single-line messages. */ timestamp_format?: string /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { column_names?: never, delimiter?: never, documents_to_sample?: never, ecs_compatibility?: never, explain?: never, field?: never, format?: never, grok_pattern?: never, index?: never, quote?: never, should_trim_fields?: never, timeout?: never, timestamp_field?: never, timestamp_format?: never } @@ -24368,27 +35213,90 @@ export interface TextStructureFindFieldStructureResponse { } export interface TextStructureFindMessageStructureRequest extends RequestBase { -/** If the format is `delimited`, you can specify the column names in a comma-separated list. If this parameter is not specified, the structure finder uses the column names from the header row of the text. If the text does not have a header role, columns are named "column1", "column2", "column3", for example. */ + /** If the format is `delimited`, you can specify the column names in a comma-separated list. + * If this parameter is not specified, the structure finder uses the column names from the header row of the text. + * If the text does not have a header role, columns are named "column1", "column2", "column3", for example. */ column_names?: string - /** If you the format is `delimited`, you can specify the character used to delimit the values in each row. Only a single character is supported; the delimiter cannot have multiple characters. By default, the API considers the following possibilities: comma, tab, semi-colon, and pipe (`|`). In this default scenario, all rows must have the same number of fields for the delimited format to be detected. If you specify a delimiter, up to 10% of the rows can have a different number of columns than the first row. */ + /** If you the format is `delimited`, you can specify the character used to delimit the values in each row. + * Only a single character is supported; the delimiter cannot have multiple characters. + * By default, the API considers the following possibilities: comma, tab, semi-colon, and pipe (`|`). + * In this default scenario, all rows must have the same number of fields for the delimited format to be detected. + * If you specify a delimiter, up to 10% of the rows can have a different number of columns than the first row. */ delimiter?: string - /** The mode of compatibility with ECS compliant Grok patterns. Use this parameter to specify whether to use ECS Grok patterns instead of legacy ones when the structure finder creates a Grok pattern. This setting primarily has an impact when a whole message Grok pattern such as `%{CATALINALOG}` matches the input. If the structure finder identifies a common structure but has no idea of meaning then generic field names such as `path`, `ipaddress`, `field1`, and `field2` are used in the `grok_pattern` output, with the intention that a user who knows the meanings rename these fields before using it. */ + /** The mode of compatibility with ECS compliant Grok patterns. + * Use this parameter to specify whether to use ECS Grok patterns instead of legacy ones when the structure finder creates a Grok pattern. + * This setting primarily has an impact when a whole message Grok pattern such as `%{CATALINALOG}` matches the input. + * If the structure finder identifies a common structure but has no idea of meaning then generic field names such as `path`, `ipaddress`, `field1`, and `field2` are used in the `grok_pattern` output, with the intention that a user who knows the meanings rename these fields before using it. */ ecs_compatibility?: TextStructureEcsCompatibilityType /** If this parameter is set to true, the response includes a field named `explanation`, which is an array of strings that indicate how the structure finder produced its result. */ explain?: boolean - /** The high level structure of the text. By default, the API chooses the format. In this default scenario, all rows must have the same number of fields for a delimited format to be detected. If the format is `delimited` and the delimiter is not set, however, the API tolerates up to 5% of rows that have a different number of columns than the first row. */ + /** The high level structure of the text. + * By default, the API chooses the format. + * In this default scenario, all rows must have the same number of fields for a delimited format to be detected. + * If the format is `delimited` and the delimiter is not set, however, the API tolerates up to 5% of rows that have a different number of columns than the first row. */ format?: TextStructureFormatType - /** If the format is `semi_structured_text`, you can specify a Grok pattern that is used to extract fields from every message in the text. The name of the timestamp field in the Grok pattern must match what is specified in the `timestamp_field` parameter. If that parameter is not specified, the name of the timestamp field in the Grok pattern must match "timestamp". If `grok_pattern` is not specified, the structure finder creates a Grok pattern. */ + /** If the format is `semi_structured_text`, you can specify a Grok pattern that is used to extract fields from every message in the text. + * The name of the timestamp field in the Grok pattern must match what is specified in the `timestamp_field` parameter. + * If that parameter is not specified, the name of the timestamp field in the Grok pattern must match "timestamp". + * If `grok_pattern` is not specified, the structure finder creates a Grok pattern. */ grok_pattern?: GrokPattern - /** If the format is `delimited`, you can specify the character used to quote the values in each row if they contain newlines or the delimiter character. Only a single character is supported. If this parameter is not specified, the default value is a double quote (`"`). If your delimited text format does not use quoting, a workaround is to set this argument to a character that does not appear anywhere in the sample. */ + /** If the format is `delimited`, you can specify the character used to quote the values in each row if they contain newlines or the delimiter character. + * Only a single character is supported. + * If this parameter is not specified, the default value is a double quote (`"`). + * If your delimited text format does not use quoting, a workaround is to set this argument to a character that does not appear anywhere in the sample. */ quote?: string - /** If the format is `delimited`, you can specify whether values between delimiters should have whitespace trimmed from them. If this parameter is not specified and the delimiter is pipe (`|`), the default value is true. Otherwise, the default value is `false`. */ + /** If the format is `delimited`, you can specify whether values between delimiters should have whitespace trimmed from them. + * If this parameter is not specified and the delimiter is pipe (`|`), the default value is true. + * Otherwise, the default value is `false`. */ should_trim_fields?: boolean - /** The maximum amount of time that the structure analysis can take. If the analysis is still running when the timeout expires, it will be stopped. */ + /** The maximum amount of time that the structure analysis can take. + * If the analysis is still running when the timeout expires, it will be stopped. */ timeout?: Duration - /** The name of the field that contains the primary timestamp of each record in the text. In particular, if the text was ingested into an index, this is the field that would be used to populate the `@timestamp` field. If the format is `semi_structured_text`, this field must match the name of the appropriate extraction in the `grok_pattern`. Therefore, for semi-structured text, it is best not to specify this parameter unless `grok_pattern` is also specified. For structured text, if you specify this parameter, the field must exist within the text. If this parameter is not specified, the structure finder makes a decision about which field (if any) is the primary timestamp field. For structured text, it is not compulsory to have a timestamp in the text. */ + /** The name of the field that contains the primary timestamp of each record in the text. + * In particular, if the text was ingested into an index, this is the field that would be used to populate the `@timestamp` field. + * + * If the format is `semi_structured_text`, this field must match the name of the appropriate extraction in the `grok_pattern`. + * Therefore, for semi-structured text, it is best not to specify this parameter unless `grok_pattern` is also specified. + * + * For structured text, if you specify this parameter, the field must exist within the text. + * + * If this parameter is not specified, the structure finder makes a decision about which field (if any) is the primary timestamp field. + * For structured text, it is not compulsory to have a timestamp in the text. */ timestamp_field?: Field - /** The Java time format of the timestamp field in the text. Only a subset of Java time format letter groups are supported: * `a` * `d` * `dd` * `EEE` * `EEEE` * `H` * `HH` * `h` * `M` * `MM` * `MMM` * `MMMM` * `mm` * `ss` * `XX` * `XXX` * `yy` * `yyyy` * `zzz` Additionally `S` letter groups (fractional seconds) of length one to nine are supported providing they occur after `ss` and are separated from the `ss` by a period (`.`), comma (`,`), or colon (`:`). Spacing and punctuation is also permitted with the exception a question mark (`?`), newline, and carriage return, together with literal text enclosed in single quotes. For example, `MM/dd HH.mm.ss,SSSSSS 'in' yyyy` is a valid override format. One valuable use case for this parameter is when the format is semi-structured text, there are multiple timestamp formats in the text, and you know which format corresponds to the primary timestamp, but you do not want to specify the full `grok_pattern`. Another is when the timestamp format is one that the structure finder does not consider by default. If this parameter is not specified, the structure finder chooses the best format from a built-in set. If the special value `null` is specified, the structure finder will not look for a primary timestamp in the text. When the format is semi-structured text, this will result in the structure finder treating the text as single-line messages. */ + /** The Java time format of the timestamp field in the text. + * Only a subset of Java time format letter groups are supported: + * + * * `a` + * * `d` + * * `dd` + * * `EEE` + * * `EEEE` + * * `H` + * * `HH` + * * `h` + * * `M` + * * `MM` + * * `MMM` + * * `MMMM` + * * `mm` + * * `ss` + * * `XX` + * * `XXX` + * * `yy` + * * `yyyy` + * * `zzz` + * + * Additionally `S` letter groups (fractional seconds) of length one to nine are supported providing they occur after `ss` and are separated from the `ss` by a period (`.`), comma (`,`), or colon (`:`). + * Spacing and punctuation is also permitted with the exception a question mark (`?`), newline, and carriage return, together with literal text enclosed in single quotes. + * For example, `MM/dd HH.mm.ss,SSSSSS 'in' yyyy` is a valid override format. + * + * One valuable use case for this parameter is when the format is semi-structured text, there are multiple timestamp formats in the text, and you know which format corresponds to the primary timestamp, but you do not want to specify the full `grok_pattern`. + * Another is when the timestamp format is one that the structure finder does not consider by default. + * + * If this parameter is not specified, the structure finder chooses the best format from a built-in set. + * + * If the special value `null` is specified, the structure finder will not look for a primary timestamp in the text. + * When the format is semi-structured text, this will result in the structure finder treating the text as single-line messages. */ timestamp_format?: string /** The list of messages you want to analyze. */ messages: string[] @@ -24417,35 +35325,113 @@ export interface TextStructureFindMessageStructureResponse { } export interface TextStructureFindStructureRequest { -/** The text's character set. It must be a character set that is supported by the JVM that Elasticsearch uses. For example, `UTF-8`, `UTF-16LE`, `windows-1252`, or `EUC-JP`. If this parameter is not specified, the structure finder chooses an appropriate character set. */ + /** The text's character set. + * It must be a character set that is supported by the JVM that Elasticsearch uses. + * For example, `UTF-8`, `UTF-16LE`, `windows-1252`, or `EUC-JP`. + * If this parameter is not specified, the structure finder chooses an appropriate character set. */ charset?: string - /** If you have set format to `delimited`, you can specify the column names in a comma-separated list. If this parameter is not specified, the structure finder uses the column names from the header row of the text. If the text does not have a header role, columns are named "column1", "column2", "column3", for example. */ + /** If you have set format to `delimited`, you can specify the column names in a comma-separated list. + * If this parameter is not specified, the structure finder uses the column names from the header row of the text. + * If the text does not have a header role, columns are named "column1", "column2", "column3", for example. */ column_names?: string - /** If you have set `format` to `delimited`, you can specify the character used to delimit the values in each row. Only a single character is supported; the delimiter cannot have multiple characters. By default, the API considers the following possibilities: comma, tab, semi-colon, and pipe (`|`). In this default scenario, all rows must have the same number of fields for the delimited format to be detected. If you specify a delimiter, up to 10% of the rows can have a different number of columns than the first row. */ + /** If you have set `format` to `delimited`, you can specify the character used to delimit the values in each row. + * Only a single character is supported; the delimiter cannot have multiple characters. + * By default, the API considers the following possibilities: comma, tab, semi-colon, and pipe (`|`). + * In this default scenario, all rows must have the same number of fields for the delimited format to be detected. + * If you specify a delimiter, up to 10% of the rows can have a different number of columns than the first row. */ delimiter?: string - /** The mode of compatibility with ECS compliant Grok patterns. Use this parameter to specify whether to use ECS Grok patterns instead of legacy ones when the structure finder creates a Grok pattern. Valid values are `disabled` and `v1`. This setting primarily has an impact when a whole message Grok pattern such as `%{CATALINALOG}` matches the input. If the structure finder identifies a common structure but has no idea of meaning then generic field names such as `path`, `ipaddress`, `field1`, and `field2` are used in the `grok_pattern` output, with the intention that a user who knows the meanings rename these fields before using it. */ + /** The mode of compatibility with ECS compliant Grok patterns. + * Use this parameter to specify whether to use ECS Grok patterns instead of legacy ones when the structure finder creates a Grok pattern. + * Valid values are `disabled` and `v1`. + * This setting primarily has an impact when a whole message Grok pattern such as `%{CATALINALOG}` matches the input. + * If the structure finder identifies a common structure but has no idea of meaning then generic field names such as `path`, `ipaddress`, `field1`, and `field2` are used in the `grok_pattern` output, with the intention that a user who knows the meanings rename these fields before using it. */ ecs_compatibility?: string - /** If this parameter is set to `true`, the response includes a field named explanation, which is an array of strings that indicate how the structure finder produced its result. If the structure finder produces unexpected results for some text, use this query parameter to help you determine why the returned structure was chosen. */ + /** If this parameter is set to `true`, the response includes a field named explanation, which is an array of strings that indicate how the structure finder produced its result. + * If the structure finder produces unexpected results for some text, use this query parameter to help you determine why the returned structure was chosen. */ explain?: boolean - /** The high level structure of the text. Valid values are `ndjson`, `xml`, `delimited`, and `semi_structured_text`. By default, the API chooses the format. In this default scenario, all rows must have the same number of fields for a delimited format to be detected. If the format is set to `delimited` and the delimiter is not set, however, the API tolerates up to 5% of rows that have a different number of columns than the first row. */ + /** The high level structure of the text. + * Valid values are `ndjson`, `xml`, `delimited`, and `semi_structured_text`. + * By default, the API chooses the format. + * In this default scenario, all rows must have the same number of fields for a delimited format to be detected. + * If the format is set to `delimited` and the delimiter is not set, however, the API tolerates up to 5% of rows that have a different number of columns than the first row. */ format?: string - /** If you have set `format` to `semi_structured_text`, you can specify a Grok pattern that is used to extract fields from every message in the text. The name of the timestamp field in the Grok pattern must match what is specified in the `timestamp_field` parameter. If that parameter is not specified, the name of the timestamp field in the Grok pattern must match "timestamp". If `grok_pattern` is not specified, the structure finder creates a Grok pattern. */ + /** If you have set `format` to `semi_structured_text`, you can specify a Grok pattern that is used to extract fields from every message in the text. + * The name of the timestamp field in the Grok pattern must match what is specified in the `timestamp_field` parameter. + * If that parameter is not specified, the name of the timestamp field in the Grok pattern must match "timestamp". + * If `grok_pattern` is not specified, the structure finder creates a Grok pattern. */ grok_pattern?: GrokPattern - /** If you have set `format` to `delimited`, you can use this parameter to indicate whether the column names are in the first row of the text. If this parameter is not specified, the structure finder guesses based on the similarity of the first row of the text to other rows. */ + /** If you have set `format` to `delimited`, you can use this parameter to indicate whether the column names are in the first row of the text. + * If this parameter is not specified, the structure finder guesses based on the similarity of the first row of the text to other rows. */ has_header_row?: boolean - /** The maximum number of characters in a message when lines are merged to form messages while analyzing semi-structured text. If you have extremely long messages you may need to increase this, but be aware that this may lead to very long processing times if the way to group lines into messages is misdetected. */ + /** The maximum number of characters in a message when lines are merged to form messages while analyzing semi-structured text. + * If you have extremely long messages you may need to increase this, but be aware that this may lead to very long processing times if the way to group lines into messages is misdetected. */ line_merge_size_limit?: uint - /** The number of lines to include in the structural analysis, starting from the beginning of the text. The minimum is 2. If the value of this parameter is greater than the number of lines in the text, the analysis proceeds (as long as there are at least two lines in the text) for all of the lines. NOTE: The number of lines and the variation of the lines affects the speed of the analysis. For example, if you upload text where the first 1000 lines are all variations on the same message, the analysis will find more commonality than would be seen with a bigger sample. If possible, however, it is more efficient to upload sample text with more variety in the first 1000 lines than to request analysis of 100000 lines to achieve some variety. */ + /** The number of lines to include in the structural analysis, starting from the beginning of the text. + * The minimum is 2. + * If the value of this parameter is greater than the number of lines in the text, the analysis proceeds (as long as there are at least two lines in the text) for all of the lines. + * + * NOTE: The number of lines and the variation of the lines affects the speed of the analysis. + * For example, if you upload text where the first 1000 lines are all variations on the same message, the analysis will find more commonality than would be seen with a bigger sample. + * If possible, however, it is more efficient to upload sample text with more variety in the first 1000 lines than to request analysis of 100000 lines to achieve some variety. */ lines_to_sample?: uint - /** If you have set `format` to `delimited`, you can specify the character used to quote the values in each row if they contain newlines or the delimiter character. Only a single character is supported. If this parameter is not specified, the default value is a double quote (`"`). If your delimited text format does not use quoting, a workaround is to set this argument to a character that does not appear anywhere in the sample. */ + /** If you have set `format` to `delimited`, you can specify the character used to quote the values in each row if they contain newlines or the delimiter character. + * Only a single character is supported. + * If this parameter is not specified, the default value is a double quote (`"`). + * If your delimited text format does not use quoting, a workaround is to set this argument to a character that does not appear anywhere in the sample. */ quote?: string - /** If you have set `format` to `delimited`, you can specify whether values between delimiters should have whitespace trimmed from them. If this parameter is not specified and the delimiter is pipe (`|`), the default value is `true`. Otherwise, the default value is `false`. */ + /** If you have set `format` to `delimited`, you can specify whether values between delimiters should have whitespace trimmed from them. + * If this parameter is not specified and the delimiter is pipe (`|`), the default value is `true`. + * Otherwise, the default value is `false`. */ should_trim_fields?: boolean - /** The maximum amount of time that the structure analysis can take. If the analysis is still running when the timeout expires then it will be stopped. */ + /** The maximum amount of time that the structure analysis can take. + * If the analysis is still running when the timeout expires then it will be stopped. */ timeout?: Duration - /** The name of the field that contains the primary timestamp of each record in the text. In particular, if the text were ingested into an index, this is the field that would be used to populate the `@timestamp` field. If the `format` is `semi_structured_text`, this field must match the name of the appropriate extraction in the `grok_pattern`. Therefore, for semi-structured text, it is best not to specify this parameter unless `grok_pattern` is also specified. For structured text, if you specify this parameter, the field must exist within the text. If this parameter is not specified, the structure finder makes a decision about which field (if any) is the primary timestamp field. For structured text, it is not compulsory to have a timestamp in the text. */ + /** The name of the field that contains the primary timestamp of each record in the text. + * In particular, if the text were ingested into an index, this is the field that would be used to populate the `@timestamp` field. + * + * If the `format` is `semi_structured_text`, this field must match the name of the appropriate extraction in the `grok_pattern`. + * Therefore, for semi-structured text, it is best not to specify this parameter unless `grok_pattern` is also specified. + * + * For structured text, if you specify this parameter, the field must exist within the text. + * + * If this parameter is not specified, the structure finder makes a decision about which field (if any) is the primary timestamp field. + * For structured text, it is not compulsory to have a timestamp in the text. */ timestamp_field?: Field - /** The Java time format of the timestamp field in the text. Only a subset of Java time format letter groups are supported: * `a` * `d` * `dd` * `EEE` * `EEEE` * `H` * `HH` * `h` * `M` * `MM` * `MMM` * `MMMM` * `mm` * `ss` * `XX` * `XXX` * `yy` * `yyyy` * `zzz` Additionally `S` letter groups (fractional seconds) of length one to nine are supported providing they occur after `ss` and separated from the `ss` by a `.`, `,` or `:`. Spacing and punctuation is also permitted with the exception of `?`, newline and carriage return, together with literal text enclosed in single quotes. For example, `MM/dd HH.mm.ss,SSSSSS 'in' yyyy` is a valid override format. One valuable use case for this parameter is when the format is semi-structured text, there are multiple timestamp formats in the text, and you know which format corresponds to the primary timestamp, but you do not want to specify the full `grok_pattern`. Another is when the timestamp format is one that the structure finder does not consider by default. If this parameter is not specified, the structure finder chooses the best format from a built-in set. If the special value `null` is specified the structure finder will not look for a primary timestamp in the text. When the format is semi-structured text this will result in the structure finder treating the text as single-line messages. */ + /** The Java time format of the timestamp field in the text. + * + * Only a subset of Java time format letter groups are supported: + * + * * `a` + * * `d` + * * `dd` + * * `EEE` + * * `EEEE` + * * `H` + * * `HH` + * * `h` + * * `M` + * * `MM` + * * `MMM` + * * `MMMM` + * * `mm` + * * `ss` + * * `XX` + * * `XXX` + * * `yy` + * * `yyyy` + * * `zzz` + * + * Additionally `S` letter groups (fractional seconds) of length one to nine are supported providing they occur after `ss` and separated from the `ss` by a `.`, `,` or `:`. + * Spacing and punctuation is also permitted with the exception of `?`, newline and carriage return, together with literal text enclosed in single quotes. + * For example, `MM/dd HH.mm.ss,SSSSSS 'in' yyyy` is a valid override format. + * + * One valuable use case for this parameter is when the format is semi-structured text, there are multiple timestamp formats in the text, and you know which format corresponds to the primary timestamp, but you do not want to specify the full `grok_pattern`. + * Another is when the timestamp format is one that the structure finder does not consider by default. + * + * If this parameter is not specified, the structure finder chooses the best format from a built-in set. + * + * If the special value `null` is specified the structure finder will not look for a primary timestamp in the text. + * When the format is semi-structured text this will result in the structure finder treating the text as single-line messages. */ timestamp_format?: string text_files?: TJsonDocument[] /** All values in `body` will be added to the request body. */ @@ -24455,25 +35441,44 @@ export interface TextStructureFindStructureRequest { } export interface TextStructureFindStructureResponse { + /** The character encoding used to parse the text. */ charset: string has_header_row?: boolean + /** For UTF character encodings, it indicates whether the text begins with a byte order marker. */ has_byte_order_marker: boolean + /** Valid values include `ndjson`, `xml`, `delimited`, and `semi_structured_text`. */ format: string + /** The most common values of each field, plus basic numeric statistics for the numeric `page_count` field. + * This information may provide clues that the data needs to be cleaned or transformed prior to use by other Elastic Stack functionality. */ field_stats: Record + /** The first two messages in the text verbatim. + * This may help diagnose parse errors or accidental uploads of the wrong text. */ sample_start: string + /** The number of distinct messages the lines contained. + * For NDJSON, this value is the same as `num_lines_analyzed`. + * For other text formats, messages can span several lines. */ num_messages_analyzed: integer + /** Some suitable mappings for an index into which the data could be ingested. */ mappings: MappingTypeMapping quote?: string delimiter?: string + /** If a timestamp format is detected that does not include a timezone, `need_client_timezone` is `true`. + * The server that parses the text must therefore be told the correct timezone by the client. */ need_client_timezone: boolean + /** The number of lines of the text that were analyzed. */ num_lines_analyzed: integer + /** If `format` is `delimited`, the `column_names` field lists the column names in the order they appear in the sample. */ column_names?: string[] explanation?: string[] grok_pattern?: GrokPattern multiline_start_pattern?: string exclude_lines_pattern?: string + /** The Java time formats recognized in the time fields. + * Elasticsearch mappings and ingest pipelines use this format. */ java_timestamp_formats?: string[] + /** Information that is used to tell Logstash how to parse timestamps. */ joda_timestamp_formats?: string[] + /** The field considered most likely to be the primary timestamp of each document. */ timestamp_field?: Field should_trim_fields?: boolean ingest_pipeline: IngestPipelineConfig @@ -24491,7 +35496,9 @@ export interface TextStructureTestGrokPatternMatchedText { } export interface TextStructureTestGrokPatternRequest extends RequestBase { -/** The mode of compatibility with ECS compliant Grok patterns. Use this parameter to specify whether to use ECS Grok patterns instead of legacy ones when the structure finder creates a Grok pattern. Valid values are `disabled` and `v1`. */ + /** The mode of compatibility with ECS compliant Grok patterns. + * Use this parameter to specify whether to use ECS Grok patterns instead of legacy ones when the structure finder creates a Grok pattern. + * Valid values are `disabled` and `v1`. */ ecs_compatibility?: string /** The Grok pattern to run on the text. */ grok_pattern: GrokPattern @@ -24508,18 +35515,35 @@ export interface TextStructureTestGrokPatternResponse { } export interface TransformDestination { + /** The destination index for the transform. The mappings of the destination index are deduced based on the source + * fields when possible. If alternate mappings are required, use the create index API prior to starting the + * transform. */ index?: IndexName + /** The unique identifier for an ingest pipeline. */ pipeline?: string } export interface TransformLatest { + /** Specifies the date field that is used to identify the latest documents. */ sort: Field + /** Specifies an array of one or more fields that are used to group the data. */ unique_key: Field[] } export interface TransformPivot { + /** Defines how to aggregate the grouped data. The following aggregations are currently supported: average, bucket + * script, bucket selector, cardinality, filter, geo bounds, geo centroid, geo line, max, median absolute deviation, + * min, missing, percentiles, rare terms, scripted metric, stats, sum, terms, top metrics, value count, weighted + * average. */ aggregations?: Record + /** Defines how to aggregate the grouped data. The following aggregations are currently supported: average, bucket + * script, bucket selector, cardinality, filter, geo bounds, geo centroid, geo line, max, median absolute deviation, + * min, missing, percentiles, rare terms, scripted metric, stats, sum, terms, top metrics, value count, weighted + * average. + * @alias aggregations */ aggs?: Record + /** Defines how to group the data. More than one grouping can be defined per pivot. The following groupings are + * currently supported: date histogram, geotile grid, histogram, terms. */ group_by?: Record } @@ -24531,44 +35555,77 @@ export interface TransformPivotGroupByContainer { } export interface TransformRetentionPolicy { + /** The date field that is used to calculate the age of the document. */ field: Field + /** Specifies the maximum age of a document in the destination index. Documents that are older than the configured + * value are removed from the destination index. */ max_age: Duration } export interface TransformRetentionPolicyContainer { + /** Specifies that the transform uses a time field to set the retention policy. */ time?: TransformRetentionPolicy } export interface TransformSettings { + /** Specifies whether the transform checkpoint ranges should be optimized for performance. Such optimization can align + * checkpoint ranges with the date histogram interval when date histogram is specified as a group source in the + * transform config. As a result, less document updates in the destination index will be performed thus improving + * overall performance. */ align_checkpoints?: boolean + /** Defines if dates in the ouput should be written as ISO formatted string or as millis since epoch. epoch_millis was + * the default for transforms created before version 7.11. For compatible output set this value to `true`. */ dates_as_epoch_millis?: boolean + /** Specifies whether the transform should deduce the destination index mappings from the transform configuration. */ deduce_mappings?: boolean + /** Specifies a limit on the number of input documents per second. This setting throttles the transform by adding a + * wait time between search requests. The default value is null, which disables throttling. */ docs_per_second?: float + /** Defines the initial page size to use for the composite aggregation for each checkpoint. If circuit breaker + * exceptions occur, the page size is dynamically adjusted to a lower value. The minimum value is `10` and the + * maximum is `65,536`. */ max_page_search_size?: integer + /** If `true`, the transform runs in unattended mode. In unattended mode, the transform retries indefinitely in case + * of an error which means the transform never fails. Setting the number of retries other than infinite fails in + * validation. */ unattended?: boolean } export interface TransformSource { + /** The source indices for the transform. It can be a single index, an index pattern (for example, `"my-index-*""`), an + * array of indices (for example, `["my-index-000001", "my-index-000002"]`), or an array of index patterns (for + * example, `["my-index-*", "my-other-index-*"]`. For remote indices use the syntax `"remote_name:index_name"`. If + * any indices are in remote clusters then the master node and at least one transform node must have the `remote_cluster_client` node role. */ index: Indices + /** A query clause that retrieves a subset of data from the source index. */ query?: QueryDslQueryContainer + /** Definitions of search-time runtime fields that can be used by the transform. For search runtime fields all data + * nodes, including remote nodes, must be 7.12 or later. */ runtime_mappings?: MappingRuntimeFields } export interface TransformSyncContainer { + /** Specifies that the transform uses a time field to synchronize the source and destination indices. */ time?: TransformTimeSync } export interface TransformTimeSync { + /** The time delay between the current time and the latest input data time. */ delay?: Duration + /** The date field that is used to identify new documents in the source. In general, it’s a good idea to use a field + * that contains the ingest timestamp. If you use a different field, you might need to set the delay such that it + * accounts for data transmission delays. */ field: Field } export interface TransformDeleteTransformRequest extends RequestBase { -/** Identifier for the transform. */ + /** Identifier for the transform. */ transform_id: Id - /** If this value is false, the transform must be stopped before it can be deleted. If true, the transform is deleted regardless of its current state. */ + /** If this value is false, the transform must be stopped before it can be deleted. If true, the transform is + * deleted regardless of its current state. */ force?: boolean - /** If this value is true, the destination index is deleted together with the transform. If false, the destination index will not be deleted */ + /** If this value is true, the destination index is deleted together with the transform. If false, the destination + * index will not be deleted */ delete_dest_index?: boolean /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration @@ -24581,15 +35638,27 @@ export interface TransformDeleteTransformRequest extends RequestBase { export type TransformDeleteTransformResponse = AcknowledgedResponseBase export interface TransformGetTransformRequest extends RequestBase { -/** Identifier for the transform. It can be a transform identifier or a wildcard expression. You can get information for all transforms by using `_all`, by specifying `*` as the ``, or by omitting the ``. */ + /** Identifier for the transform. It can be a transform identifier or a + * wildcard expression. You can get information for all transforms by using + * `_all`, by specifying `*` as the ``, or by omitting the + * ``. */ transform_id?: Names - /** Specifies what to do when the request: 1. Contains wildcard expressions and there are no transforms that match. 2. Contains the _all string or no identifiers and there are no matches. 3. Contains wildcard expressions and there are only partial matches. If this parameter is false, the request returns a 404 status code when there are no matches or only partial matches. */ + /** Specifies what to do when the request: + * + * 1. Contains wildcard expressions and there are no transforms that match. + * 2. Contains the _all string or no identifiers and there are no matches. + * 3. Contains wildcard expressions and there are only partial matches. + * + * If this parameter is false, the request returns a 404 status code when + * there are no matches or only partial matches. */ allow_no_match?: boolean /** Skips the specified number of transforms. */ from?: integer /** Specifies the maximum number of transforms to obtain. */ size?: integer - /** Excludes fields that were automatically added when creating the transform. This allows the configuration to be in an acceptable format to be retrieved and then added to another cluster. */ + /** Excludes fields that were automatically added when creating the + * transform. This allows the configuration to be in an acceptable format to + * be retrieved and then added to another cluster. */ exclude_generated?: boolean /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { transform_id?: never, allow_no_match?: never, from?: never, size?: never, exclude_generated?: never } @@ -24603,18 +35672,28 @@ export interface TransformGetTransformResponse { } export interface TransformGetTransformTransformSummary { + /** The security privileges that the transform uses to run its queries. If Elastic Stack security features were disabled at the time of the most recent update to the transform, this property is omitted. */ authorization?: MlTransformAuthorization + /** The time the transform was created. */ create_time?: EpochTime + create_time_string?: DateTime + /** Free text description of the transform. */ description?: string + /** The destination for the transform. */ dest: ReindexDestination frequency?: Duration id: Id latest?: TransformLatest + /** The pivot method transforms the data by aggregating and grouping it. */ pivot?: TransformPivot retention_policy?: TransformRetentionPolicyContainer + /** Defines optional transform settings. */ settings?: TransformSettings + /** The source of the data for the transform. */ source: TransformSource + /** Defines the properties transforms require to run continuously. */ sync?: TransformSyncContainer + /** The version of Elasticsearch that existed on the node when the transform was created. */ version?: VersionString _meta?: Metadata } @@ -24630,17 +35709,28 @@ export interface TransformGetTransformStatsCheckpointStats { export interface TransformGetTransformStatsCheckpointing { changes_last_detected_at?: long - changes_last_detected_at_date_time?: DateTime + changes_last_detected_at_string?: DateTime last: TransformGetTransformStatsCheckpointStats next?: TransformGetTransformStatsCheckpointStats operations_behind?: long last_search_time?: long + last_search_time_string?: DateTime } export interface TransformGetTransformStatsRequest extends RequestBase { -/** Identifier for the transform. It can be a transform identifier or a wildcard expression. You can get information for all transforms by using `_all`, by specifying `*` as the ``, or by omitting the ``. */ + /** Identifier for the transform. It can be a transform identifier or a + * wildcard expression. You can get information for all transforms by using + * `_all`, by specifying `*` as the ``, or by omitting the + * ``. */ transform_id: Names - /** Specifies what to do when the request: 1. Contains wildcard expressions and there are no transforms that match. 2. Contains the _all string or no identifiers and there are no matches. 3. Contains wildcard expressions and there are only partial matches. If this parameter is false, the request returns a 404 status code when there are no matches or only partial matches. */ + /** Specifies what to do when the request: + * + * 1. Contains wildcard expressions and there are no transforms that match. + * 2. Contains the _all string or no identifiers and there are no matches. + * 3. Contains wildcard expressions and there are only partial matches. + * + * If this parameter is false, the request returns a 404 status code when + * there are no matches or only partial matches. */ allow_no_match?: boolean /** Skips the specified number of transforms. */ from?: long @@ -24659,6 +35749,20 @@ export interface TransformGetTransformStatsResponse { transforms: TransformGetTransformStatsTransformStats[] } +export interface TransformGetTransformStatsTransformHealthIssue { + /** The type of the issue */ + type: string + /** A description of the issue */ + issue: string + /** Details about the issue */ + details?: string + /** Number of times this issue has occurred since it started */ + count: integer + /** The timestamp this issue occurred for for the first time */ + first_occurrence?: EpochTime + first_occurence_string?: DateTime +} + export interface TransformGetTransformStatsTransformIndexerStats { delete_time_in_ms?: EpochTime documents_indexed: long @@ -24691,6 +35795,7 @@ export interface TransformGetTransformStatsTransformStats { checkpointing: TransformGetTransformStatsCheckpointing health?: TransformGetTransformStatsTransformStatsHealth id: Id + /** @remarks This property is not supported on Elastic Cloud Serverless. */ node?: NodeAttributes reason?: string state: string @@ -24699,20 +35804,29 @@ export interface TransformGetTransformStatsTransformStats { export interface TransformGetTransformStatsTransformStatsHealth { status: HealthStatus + /** If a non-healthy status is returned, contains a list of issues of the transform. */ + issues?: TransformGetTransformStatsTransformHealthIssue[] } export interface TransformPreviewTransformRequest extends RequestBase { -/** Identifier for the transform to preview. If you specify this path parameter, you cannot provide transform configuration details in the request body. */ + /** Identifier for the transform to preview. If you specify this path parameter, you cannot provide transform + * configuration details in the request body. */ transform_id?: Id - /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ + /** Period to wait for a response. If no response is received before the + * timeout expires, the request fails and returns an error. */ timeout?: Duration /** The destination for the transform. */ dest?: TransformDestination /** Free text description of the transform. */ description?: string - /** The interval between checks for changes in the source indices when the transform is running continuously. Also determines the retry interval in the event of transient failures while the transform is searching or indexing. The minimum value is 1s and the maximum is 1h. */ + /** The interval between checks for changes in the source indices when the + * transform is running continuously. Also determines the retry interval in + * the event of transient failures while the transform is searching or + * indexing. The minimum value is 1s and the maximum is 1h. */ frequency?: Duration - /** The pivot method transforms the data by aggregating and grouping it. These objects define the group by fields and the aggregation to reduce the data. */ + /** The pivot method transforms the data by aggregating and grouping it. + * These objects define the group by fields and the aggregation to reduce + * the data. */ pivot?: TransformPivot /** The source of the data for the transform. */ source?: TransformSource @@ -24720,9 +35834,11 @@ export interface TransformPreviewTransformRequest extends RequestBase { settings?: TransformSettings /** Defines the properties transforms require to run continuously. */ sync?: TransformSyncContainer - /** Defines a retention policy for the transform. Data that meets the defined criteria is deleted from the destination index. */ + /** Defines a retention policy for the transform. Data that meets the defined + * criteria is deleted from the destination index. */ retention_policy?: TransformRetentionPolicyContainer - /** The latest method transforms the data by finding the latest document for each unique key. */ + /** The latest method transforms the data by finding the latest document for + * each unique key. */ latest?: TransformLatest /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { transform_id?: never, timeout?: never, dest?: never, description?: never, frequency?: never, pivot?: never, source?: never, settings?: never, sync?: never, retention_policy?: never, latest?: never } @@ -24736,9 +35852,14 @@ export interface TransformPreviewTransformResponse { } export interface TransformPutTransformRequest extends RequestBase { -/** Identifier for the transform. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It has a 64 character limit and must start and end with alphanumeric characters. */ + /** Identifier for the transform. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), + * hyphens, and underscores. It has a 64 character limit and must start and end with alphanumeric characters. */ transform_id: Id - /** When the transform is created, a series of validations occur to ensure its success. For example, there is a check for the existence of the source indices and a check that the destination index is not part of the source index pattern. You can use this parameter to skip the checks, for example when the source index does not exist until after the transform is created. The validations are always run when you start the transform, however, with the exception of privilege checks. */ + /** When the transform is created, a series of validations occur to ensure its success. For example, there is a + * check for the existence of the source indices and a check that the destination index is not part of the source + * index pattern. You can use this parameter to skip the checks, for example when the source index does not exist + * until after the transform is created. The validations are always run when you start the transform, however, with + * the exception of privilege checks. */ defer_validation?: boolean /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration @@ -24746,15 +35867,19 @@ export interface TransformPutTransformRequest extends RequestBase { dest: TransformDestination /** Free text description of the transform. */ description?: string - /** The interval between checks for changes in the source indices when the transform is running continuously. Also determines the retry interval in the event of transient failures while the transform is searching or indexing. The minimum value is `1s` and the maximum is `1h`. */ + /** The interval between checks for changes in the source indices when the transform is running continuously. Also + * determines the retry interval in the event of transient failures while the transform is searching or indexing. + * The minimum value is `1s` and the maximum is `1h`. */ frequency?: Duration /** The latest method transforms the data by finding the latest document for each unique key. */ latest?: TransformLatest /** Defines optional transform metadata. */ _meta?: Metadata - /** The pivot method transforms the data by aggregating and grouping it. These objects define the group by fields and the aggregation to reduce the data. */ + /** The pivot method transforms the data by aggregating and grouping it. These objects define the group by fields + * and the aggregation to reduce the data. */ pivot?: TransformPivot - /** Defines a retention policy for the transform. Data that meets the defined criteria is deleted from the destination index. */ + /** Defines a retention policy for the transform. Data that meets the defined criteria is deleted from the + * destination index. */ retention_policy?: TransformRetentionPolicyContainer /** Defines optional transform settings. */ settings?: TransformSettings @@ -24771,9 +35896,11 @@ export interface TransformPutTransformRequest extends RequestBase { export type TransformPutTransformResponse = AcknowledgedResponseBase export interface TransformResetTransformRequest extends RequestBase { -/** Identifier for the transform. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It has a 64 character limit and must start and end with alphanumeric characters. */ + /** Identifier for the transform. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), + * hyphens, and underscores. It has a 64 character limit and must start and end with alphanumeric characters. */ transform_id: Id - /** If this value is `true`, the transform is reset regardless of its current state. If it's `false`, the transform must be stopped before it can be reset. */ + /** If this value is `true`, the transform is reset regardless of its current state. If it's `false`, the transform + * must be stopped before it can be reset. */ force?: boolean /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration @@ -24786,7 +35913,7 @@ export interface TransformResetTransformRequest extends RequestBase { export type TransformResetTransformResponse = AcknowledgedResponseBase export interface TransformScheduleNowTransformRequest extends RequestBase { -/** Identifier for the transform. */ + /** Identifier for the transform. */ transform_id: Id /** Controls the time to wait for the scheduling to take place */ timeout?: Duration @@ -24799,7 +35926,7 @@ export interface TransformScheduleNowTransformRequest extends RequestBase { export type TransformScheduleNowTransformResponse = AcknowledgedResponseBase export interface TransformStartTransformRequest extends RequestBase { -/** Identifier for the transform. */ + /** Identifier for the transform. */ transform_id: Id /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration @@ -24814,17 +35941,29 @@ export interface TransformStartTransformRequest extends RequestBase { export type TransformStartTransformResponse = AcknowledgedResponseBase export interface TransformStopTransformRequest extends RequestBase { -/** Identifier for the transform. To stop multiple transforms, use a comma-separated list or a wildcard expression. To stop all transforms, use `_all` or `*` as the identifier. */ + /** Identifier for the transform. To stop multiple transforms, use a comma-separated list or a wildcard expression. + * To stop all transforms, use `_all` or `*` as the identifier. */ transform_id: Name - /** Specifies what to do when the request: contains wildcard expressions and there are no transforms that match; contains the `_all` string or no identifiers and there are no matches; contains wildcard expressions and there are only partial matches. If it is true, the API returns a successful acknowledgement message when there are no matches. When there are only partial matches, the API stops the appropriate transforms. If it is false, the request returns a 404 status code when there are no matches or only partial matches. */ + /** Specifies what to do when the request: contains wildcard expressions and there are no transforms that match; + * contains the `_all` string or no identifiers and there are no matches; contains wildcard expressions and there + * are only partial matches. + * + * If it is true, the API returns a successful acknowledgement message when there are no matches. When there are + * only partial matches, the API stops the appropriate transforms. + * + * If it is false, the request returns a 404 status code when there are no matches or only partial matches. */ allow_no_match?: boolean /** If it is true, the API forcefully stops the transforms. */ force?: boolean - /** Period to wait for a response when `wait_for_completion` is `true`. If no response is received before the timeout expires, the request returns a timeout exception. However, the request continues processing and eventually moves the transform to a STOPPED state. */ + /** Period to wait for a response when `wait_for_completion` is `true`. If no response is received before the + * timeout expires, the request returns a timeout exception. However, the request continues processing and + * eventually moves the transform to a STOPPED state. */ timeout?: Duration - /** If it is true, the transform does not completely stop until the current checkpoint is completed. If it is false, the transform stops as soon as possible. */ + /** If it is true, the transform does not completely stop until the current checkpoint is completed. If it is false, + * the transform stops as soon as possible. */ wait_for_checkpoint?: boolean - /** If it is true, the API blocks until the indexer state completely stops. If it is false, the API returns immediately and the indexer is stopped asynchronously in the background. */ + /** If it is true, the API blocks until the indexer state completely stops. If it is false, the API returns + * immediately and the indexer is stopped asynchronously in the background. */ wait_for_completion?: boolean /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { transform_id?: never, allow_no_match?: never, force?: never, timeout?: never, wait_for_checkpoint?: never, wait_for_completion?: never } @@ -24835,17 +35974,23 @@ export interface TransformStopTransformRequest extends RequestBase { export type TransformStopTransformResponse = AcknowledgedResponseBase export interface TransformUpdateTransformRequest extends RequestBase { -/** Identifier for the transform. */ + /** Identifier for the transform. */ transform_id: Id - /** When true, deferrable validations are not run. This behavior may be desired if the source index does not exist until after the transform is created. */ + /** When true, deferrable validations are not run. This behavior may be + * desired if the source index does not exist until after the transform is + * created. */ defer_validation?: boolean - /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ + /** Period to wait for a response. If no response is received before the + * timeout expires, the request fails and returns an error. */ timeout?: Duration /** The destination for the transform. */ dest?: TransformDestination /** Free text description of the transform. */ description?: string - /** The interval between checks for changes in the source indices when the transform is running continuously. Also determines the retry interval in the event of transient failures while the transform is searching or indexing. The minimum value is 1s and the maximum is 1h. */ + /** The interval between checks for changes in the source indices when the + * transform is running continuously. Also determines the retry interval in + * the event of transient failures while the transform is searching or + * indexing. The minimum value is 1s and the maximum is 1h. */ frequency?: Duration /** Defines optional transform metadata. */ _meta?: Metadata @@ -24855,7 +36000,8 @@ export interface TransformUpdateTransformRequest extends RequestBase { settings?: TransformSettings /** Defines the properties transforms require to run continuously. */ sync?: TransformSyncContainer - /** Defines a retention policy for the transform. Data that meets the defined criteria is deleted from the destination index. */ + /** Defines a retention policy for the transform. Data that meets the defined + * criteria is deleted from the destination index. */ retention_policy?: TransformRetentionPolicyContainer | null /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { transform_id?: never, defer_validation?: never, timeout?: never, dest?: never, description?: never, frequency?: never, _meta?: never, source?: never, settings?: never, sync?: never, retention_policy?: never } @@ -24881,9 +36027,10 @@ export interface TransformUpdateTransformResponse { } export interface TransformUpgradeTransformsRequest extends RequestBase { -/** When true, the request checks for updates but does not run them. */ + /** When true, the request checks for updates but does not run them. */ dry_run?: boolean - /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ + /** Period to wait for a response. If no response is received before the timeout expires, the request fails and + * returns an error. */ timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { dry_run?: never, timeout?: never } @@ -24892,8 +36039,11 @@ export interface TransformUpgradeTransformsRequest extends RequestBase { } export interface TransformUpgradeTransformsResponse { + /** The number of transforms that need to be upgraded. */ needs_update: integer + /** The number of transforms that don’t require upgrading. */ no_action: integer + /** The number of transforms that have been upgraded. */ updated: integer } @@ -25074,7 +36224,9 @@ export interface WatcherExecutionState { export type WatcherExecutionStatus = 'awaits_execution' | 'checking' | 'execution_not_needed' | 'throttled' | 'executed' | 'failed' | 'deleted_while_queued' | 'not_executed_already_queued' export interface WatcherExecutionThreadPool { + /** The largest size of the execution thread pool, which indicates the largest number of concurrent running watches. */ max_size: long + /** The number of watches that were triggered and are currently queued. */ queue_size: long } @@ -25202,6 +36354,7 @@ export interface WatcherPagerDutyEvent { client?: string client_url?: string contexts?: WatcherPagerDutyContext[] + /** @alias contexts */ context?: WatcherPagerDutyContext[] description: string event_type?: WatcherPagerDutyEventType @@ -25262,9 +36415,9 @@ export interface WatcherScheduleTriggerEvent { } export interface WatcherScriptCondition { - lang?: string + lang?: ScriptLanguage params?: Record - source?: string + source?: ScriptSource id?: string } @@ -25289,9 +36442,14 @@ export interface WatcherSearchInputRequestDefinition { export interface WatcherSearchTemplateRequestBody { explain?: boolean + /** ID of the search template to use. If no source is specified, + * this parameter is required. */ id?: Id params?: Record profile?: boolean + /** An inline search template. Supports the same parameters as the search API's + * request body. Also supports Mustache variables. If no id is specified, this + * parameter is required. */ source?: string } @@ -25414,9 +36572,10 @@ export interface WatcherWebhookResult { } export interface WatcherAckWatchRequest extends RequestBase { -/** The watch identifier. */ + /** The watch identifier. */ watch_id: Name - /** A comma-separated list of the action identifiers to acknowledge. If you omit this parameter, all of the actions of the watch are acknowledged. */ + /** A comma-separated list of the action identifiers to acknowledge. + * If you omit this parameter, all of the actions of the watch are acknowledged. */ action_id?: Names /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { watch_id?: never, action_id?: never } @@ -25429,7 +36588,7 @@ export interface WatcherAckWatchResponse { } export interface WatcherActivateWatchRequest extends RequestBase { -/** The watch identifier. */ + /** The watch identifier. */ watch_id: Name /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { watch_id?: never } @@ -25442,7 +36601,7 @@ export interface WatcherActivateWatchResponse { } export interface WatcherDeactivateWatchRequest extends RequestBase { -/** The watch identifier. */ + /** The watch identifier. */ watch_id: Name /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { watch_id?: never } @@ -25455,7 +36614,7 @@ export interface WatcherDeactivateWatchResponse { } export interface WatcherDeleteWatchRequest extends RequestBase { -/** The watch identifier. */ + /** The watch identifier. */ id: Name /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { id?: never } @@ -25470,7 +36629,7 @@ export interface WatcherDeleteWatchResponse { } export interface WatcherExecuteWatchRequest extends RequestBase { -/** The watch identifier. */ + /** The watch identifier. */ id?: Id /** Defines whether the watch runs in debug mode. */ debug?: boolean @@ -25480,12 +36639,15 @@ export interface WatcherExecuteWatchRequest extends RequestBase { alternative_input?: Record /** When set to `true`, the watch execution uses the always condition. This can also be specified as an HTTP parameter. */ ignore_condition?: boolean - /** When set to `true`, the watch record representing the watch execution result is persisted to the `.watcher-history` index for the current time. In addition, the status of the watch is updated, possibly throttling subsequent runs. This can also be specified as an HTTP parameter. */ + /** When set to `true`, the watch record representing the watch execution result is persisted to the `.watcher-history` index for the current time. + * In addition, the status of the watch is updated, possibly throttling subsequent runs. + * This can also be specified as an HTTP parameter. */ record_execution?: boolean simulated_actions?: WatcherSimulatedActions /** This structure is parsed as the data of the trigger event that will be used during the watch execution. */ trigger_data?: WatcherScheduleTriggerEvent - /** When present, this watch is used instead of the one specified in the request. This watch is not persisted to the index and `record_execution` cannot be set. */ + /** When present, this watch is used instead of the one specified in the request. + * This watch is not persisted to the index and `record_execution` cannot be set. */ watch?: WatcherWatch /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { id?: never, debug?: never, action_modes?: never, alternative_input?: never, ignore_condition?: never, record_execution?: never, simulated_actions?: never, trigger_data?: never, watch?: never } @@ -25494,7 +36656,9 @@ export interface WatcherExecuteWatchRequest extends RequestBase { } export interface WatcherExecuteWatchResponse { + /** The watch record identifier as it would be stored in the `.watcher-history` index. */ _id: Id + /** The watch record document as it would be stored in the `.watcher-history` index. */ watch_record: WatcherExecuteWatchWatchRecord } @@ -25513,7 +36677,8 @@ export interface WatcherExecuteWatchWatchRecord { } export interface WatcherGetSettingsRequest extends RequestBase { -/** The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ + /** The period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { master_timeout?: never } @@ -25526,7 +36691,7 @@ export interface WatcherGetSettingsResponse { } export interface WatcherGetWatchRequest extends RequestBase { -/** The watch identifier. */ + /** The watch identifier. */ id: Name /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { id?: never } @@ -25545,9 +36710,10 @@ export interface WatcherGetWatchResponse { } export interface WatcherPutWatchRequest extends RequestBase { -/** The identifier for the watch. */ + /** The identifier for the watch. */ id: Id - /** The initial state of the watch. The default value is `true`, which means the watch is active by default. */ + /** The initial state of the watch. + * The default value is `true`, which means the watch is active by default. */ active?: boolean /** only update the watch if the last operation that has changed the watch has the specified primary term */ if_primary_term?: long @@ -25563,7 +36729,10 @@ export interface WatcherPutWatchRequest extends RequestBase { input?: WatcherInputContainer /** Metadata JSON that will be copied into the history entries. */ metadata?: Metadata - /** The minimum time between actions being run. The default is 5 seconds. This default can be changed in the config file with the setting `xpack.watcher.throttle.period.default_period`. If both this value and the `throttle_period_in_millis` parameter are specified, Watcher uses the last parameter included in the request. */ + /** The minimum time between actions being run. + * The default is 5 seconds. + * This default can be changed in the config file with the setting `xpack.watcher.throttle.period.default_period`. + * If both this value and the `throttle_period_in_millis` parameter are specified, Watcher uses the last parameter included in the request. */ throttle_period?: Duration /** Minimum time in milliseconds between actions being run. Defaults to 5000. If both this value and the throttle_period parameter are specified, Watcher uses the last parameter included in the request. */ throttle_period_in_millis?: DurationValue @@ -25586,9 +36755,11 @@ export interface WatcherPutWatchResponse { } export interface WatcherQueryWatchesRequest extends RequestBase { -/** The offset from the first result to fetch. It must be non-negative. */ + /** The offset from the first result to fetch. + * It must be non-negative. */ from?: integer - /** The number of hits to return. It must be non-negative. */ + /** The number of hits to return. + * It must be non-negative. */ size?: integer /** A query that filters the watches to be returned. */ query?: QueryDslQueryContainer @@ -25603,12 +36774,14 @@ export interface WatcherQueryWatchesRequest extends RequestBase { } export interface WatcherQueryWatchesResponse { + /** The total number of watches found. */ count: integer + /** A list of watches based on the `from`, `size`, or `search_after` request body parameters. */ watches: WatcherQueryWatch[] } export interface WatcherStartRequest extends RequestBase { -/** Period to wait for a connection to the master node. */ + /** Period to wait for a connection to the master node. */ master_timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { master_timeout?: never } @@ -25619,7 +36792,7 @@ export interface WatcherStartRequest extends RequestBase { export type WatcherStartResponse = AcknowledgedResponseBase export interface WatcherStatsRequest extends RequestBase { -/** Defines which additional metrics are included in the response. */ + /** Defines which additional metrics are included in the response. */ metric?: WatcherStatsWatcherMetric | WatcherStatsWatcherMetric[] /** Defines whether stack traces are generated for each watch that is running. */ emit_stacktraces?: boolean @@ -25637,24 +36810,42 @@ export interface WatcherStatsResponse { } export interface WatcherStatsWatchRecordQueuedStats { + /** The time the watch was run. + * This is just before the input is being run. */ execution_time: DateTime } export interface WatcherStatsWatchRecordStats extends WatcherStatsWatchRecordQueuedStats { + /** The current watch execution phase. */ execution_phase: WatcherExecutionPhase + /** The time the watch was triggered by the trigger engine. */ triggered_time: DateTime executed_actions?: string[] watch_id: Id + /** The watch record identifier. */ watch_record_id: Id } export type WatcherStatsWatcherMetric = '_all' | 'all' | 'queued_watches' | 'current_watches' | 'pending_watches' export interface WatcherStatsWatcherNodeStats { + /** The current executing watches metric gives insight into the watches that are currently being executed by Watcher. + * Additional information is shared per watch that is currently executing. + * This information includes the `watch_id`, the time its execution started and its current execution phase. + * To include this metric, the `metric` option should be set to `current_watches` or `_all`. + * In addition you can also specify the `emit_stacktraces=true` parameter, which adds stack traces for each watch that is being run. + * These stack traces can give you more insight into an execution of a watch. */ current_watches?: WatcherStatsWatchRecordStats[] execution_thread_pool: WatcherExecutionThreadPool + /** Watcher moderates the execution of watches such that their execution won't put too much pressure on the node and its resources. + * If too many watches trigger concurrently and there isn't enough capacity to run them all, some of the watches are queued, waiting for the current running watches to finish.s + * The queued watches metric gives insight on these queued watches. + * + * To include this metric, the `metric` option should include `queued_watches` or `_all`. */ queued_watches?: WatcherStatsWatchRecordQueuedStats[] + /** The number of watches currently registered. */ watch_count: long + /** The current state of Watcher. */ watcher_state: WatcherStatsWatcherState node_id: Id } @@ -25662,7 +36853,9 @@ export interface WatcherStatsWatcherNodeStats { export type WatcherStatsWatcherState = 'stopped' | 'starting' | 'started' | 'stopping' export interface WatcherStopRequest extends RequestBase { -/** The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. */ + /** The period to wait for the master node. + * If the master node is not available before the timeout expires, the request fails and returns an error. + * To indicate that the request should never timeout, set it to `-1`. */ master_timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { master_timeout?: never } @@ -25673,9 +36866,11 @@ export interface WatcherStopRequest extends RequestBase { export type WatcherStopResponse = AcknowledgedResponseBase export interface WatcherUpdateSettingsRequest extends RequestBase { -/** The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ + /** The period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. */ master_timeout?: Duration - /** The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ + /** The period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration 'index.auto_expand_replicas'?: string 'index.number_of_replicas'?: integer @@ -25708,8 +36903,10 @@ export interface XpackInfoFeatures { data_streams: XpackInfoFeature data_tiers: XpackInfoFeature enrich: XpackInfoFeature + /** @remarks This property is not supported on Elastic Cloud Serverless. */ enterprise_search: XpackInfoFeature eql: XpackInfoFeature + /** @remarks This property is not supported on Elastic Cloud Serverless. */ esql: XpackInfoFeature graph: XpackInfoFeature ilm: XpackInfoFeature @@ -25725,9 +36922,11 @@ export interface XpackInfoFeatures { spatial: XpackInfoFeature sql: XpackInfoFeature transform: XpackInfoFeature + /** @remarks This property is not supported on Elastic Cloud Serverless. */ universal_profiling: XpackInfoFeature voting_only: XpackInfoFeature watcher: XpackInfoFeature + /** @remarks This property is not supported on Elastic Cloud Serverless. */ archive: XpackInfoFeature } @@ -25745,11 +36944,13 @@ export interface XpackInfoNativeCodeInformation { } export interface XpackInfoRequest extends RequestBase { -/** A comma-separated list of the information categories to include in the response. For example, `build,license,features`. */ + /** A comma-separated list of the information categories to include in the response. + * For example, `build,license,features`. */ categories?: XpackInfoXPackCategory[] /** If this param is used it must be set to true */ accept_enterprise?: boolean - /** Defines whether additional human-readable information is included in the response. In particular, it adds descriptions and a tag line. */ + /** Defines whether additional human-readable information is included in the response. + * In particular, it adds descriptions and a tag line. */ human?: boolean /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { categories?: never, accept_enterprise?: never, human?: never } @@ -25921,6 +37122,7 @@ export interface XpackUsageJobUsage { export interface XpackUsageMachineLearning extends XpackUsageBase { datafeeds: Record + /** Job usage statistics. The `_all` entry is always present and gathers statistics for all jobs. */ jobs: Record node_count: integer data_frame_analytics_jobs: XpackUsageMlDataFrameAnalyticsJobs @@ -26047,7 +37249,9 @@ export interface XpackUsageRealmCache { } export interface XpackUsageRequest extends RequestBase { -/** The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. */ + /** The period to wait for a connection to the master node. + * If no response is received before the timeout expires, the request fails and returns an error. + * To indicate that the request should never timeout, set it to `-1`. */ master_timeout?: Duration /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { master_timeout?: never } @@ -26221,15 +37425,31 @@ export interface SpecUtilsAdditionalProperty { } export interface SpecUtilsCommonQueryParameters { + /** When set to `true` Elasticsearch will include the full stack trace of errors + * when they occur. */ error_trace?: boolean + /** Comma-separated list of filters in dot notation which reduce the response + * returned by Elasticsearch. */ filter_path?: string | string[] + /** When set to `true` will return statistics in a format suitable for humans. + * For example `"exists_time": "1h"` for humans and + * `"eixsts_time_in_millis": 3600000` for computers. When disabled the human + * readable values will be omitted. This makes sense for responses being consumed + * only by machines. */ human?: boolean + /** If set to `true` the returned JSON will be "pretty-formatted". Only use + * this option for debugging only. */ pretty?: boolean } export interface SpecUtilsCommonCatQueryParameters { + /** Specifies the format to return the columnar data in, can be set to + * `text`, `json`, `cbor`, `yaml`, or `smile`. */ format?: string + /** When set to `true` will output available columns. This option + * can't be combined with any other query string option. */ help?: boolean + /** When set to `true` will enable verbose output. */ v?: boolean } diff --git a/src/client.ts b/src/client.ts index 7f9f8fabe..f4cedef16 100644 --- a/src/client.ts +++ b/src/client.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License") you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ import process from 'node:process' @@ -24,6 +10,7 @@ import buffer from 'node:buffer' import os from 'node:os' import { Transport, + TransportOptions, UndiciConnection, WeightedConnectionPool, CloudConnectionPool, @@ -68,6 +55,8 @@ if (transportVersion.includes('-')) { } const nodeVersion = process.versions.node +const serverlessApiVersion = '2023-10-31' + export interface NodeOptions { /** @property url Elasticsearch node's location */ url: URL @@ -194,6 +183,9 @@ export interface ClientOptions { * @remarks Read https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/advanced-config.html#redaction for more details * @defaultValue Configuration that will replace known sources of sensitive data */ redaction?: RedactionOptions + /** @property serverMode Setting to "serverless" will change some default behavior, like enabling compression and disabling features that assume the possibility of multiple Elasticsearch nodes. + * @defaultValue "stack", which sets defaults for a traditional (non-serverless) Elasticsearch instance. */ + serverMode?: 'stack' | 'serverless' } export default class Client extends API { @@ -206,15 +198,40 @@ export default class Client extends API { constructor (opts: ClientOptions) { super() + // @ts-expect-error kChild symbol is for internal use only - if ((opts.cloud != null) && opts[kChild] === undefined) { - const { id } = opts.cloud - // the cloud id is `cluster-name:base64encodedurl` - // the url is a string divided by two '$', the first is the cloud url - // the second the elasticsearch instance, the third the kibana instance - const cloudUrls = Buffer.from(id.split(':')[1], 'base64').toString().split('$') + if ((opts.cloud != null || opts.serverMode === 'serverless') && opts[kChild] === undefined) { + if (opts.cloud != null) { + const { id } = opts.cloud + if (typeof id !== 'string') { + throw new errors.ConfigurationError('Cloud ID must be a string.') + } - opts.node = `https://${cloudUrls[1]}.${cloudUrls[0]}` + const parts = id.split(':') + if (parts.length !== 2 || parts[1] === '') { + throw new errors.ConfigurationError( + 'Cloud ID must be in the format "name:base64string".' + ) + } + + // the cloud id is `cluster-name:base64encodedurl` + // the url is a string divided by two '$', the first is the cloud url + // the second the elasticsearch instance, the third the kibana instance + + let cloudUrls + try { + cloudUrls = Buffer.from(parts[1], 'base64').toString().split('$') + } catch (err) { + throw new errors.ConfigurationError('Cloud ID base64 decoding failed.') + } + if (cloudUrls.length < 2 || cloudUrls[0] === '' || cloudUrls[1] === '') { + throw new errors.ConfigurationError( + 'Cloud ID base64 must contain at least two "$" separated parts: "$[$]".' + ) + } + + opts.node = `https://${cloudUrls[1]}.${cloudUrls[0]}` + } // Cloud has better performance with compression enabled // see https://github.com/elastic/elasticsearch-py/pull/704. @@ -239,11 +256,16 @@ export default class Client extends API { } } + const headers: Record = { + 'user-agent': `elasticsearch-js/${clientVersion} (${os.platform()} ${os.release()}-${os.arch()}; Node.js ${nodeVersion}; Transport ${transportVersion})` + } + if (opts.serverMode === 'serverless') headers['elastic-api-version'] = serverlessApiVersion + const options: Required = Object.assign({}, { Connection: UndiciConnection, - Transport: SniffingTransport, + Transport: opts.serverMode === 'serverless' ? Transport : SniffingTransport, Serializer, - ConnectionPool: (opts.cloud != null) ? CloudConnectionPool : WeightedConnectionPool, + ConnectionPool: (opts.cloud != null || opts.serverMode === 'serverless') ? CloudConnectionPool : WeightedConnectionPool, maxRetries: 3, pingTimeout: 3000, sniffInterval: false, @@ -255,9 +277,7 @@ export default class Client extends API { tls: null, caFingerprint: null, agent: null, - headers: { - 'user-agent': `elasticsearch-js/${clientVersion} (${os.platform()} ${os.release()}-${os.arch()}; Node.js ${nodeVersion}; Transport ${transportVersion})` - }, + headers, nodeFilter: null, generateRequestId: null, name: 'elasticsearch-js', @@ -271,7 +291,8 @@ export default class Client extends API { redaction: { type: 'replace', additionalKeys: [] - } + }, + serverMode: 'stack' }, opts) if (options.caFingerprint != null && isHttpConnection(opts.node ?? opts.nodes)) { @@ -340,7 +361,13 @@ export default class Client extends API { // ensure default connection values are inherited when creating new connections // see https://github.com/elastic/elasticsearch-js/issues/1791 - const nodes = options.node ?? options.nodes + let nodes = options.node ?? options.nodes + + // serverless only supports one node, so pick the first one + if (options.serverMode === 'serverless' && Array.isArray(nodes)) { + nodes = nodes[0] + } + let nodeOptions: Array = Array.isArray(nodes) ? nodes : [nodes] type ConnectionDefaults = Record nodeOptions = nodeOptions.map(opt => { @@ -368,20 +395,14 @@ export default class Client extends API { this.connectionPool.addConnection(nodeOptions) } - this.transport = new options.Transport({ + let transportOptions: TransportOptions = { diagnostic: this.diagnostic, connectionPool: this.connectionPool, serializer: this.serializer, maxRetries: options.maxRetries, requestTimeout: options.requestTimeout, - sniffInterval: options.sniffInterval, - sniffOnStart: options.sniffOnStart, - sniffOnConnectionFault: options.sniffOnConnectionFault, - sniffEndpoint: options.sniffEndpoint, compression: options.compression, headers: options.headers, - nodeFilter: options.nodeFilter, - nodeSelector: options.nodeSelector, generateRequestId: options.generateRequestId, name: options.name, opaqueIdPrefix: options.opaqueIdPrefix, @@ -389,13 +410,25 @@ export default class Client extends API { productCheck: 'Elasticsearch', maxResponseSize: options.maxResponseSize, maxCompressedResponseSize: options.maxCompressedResponseSize, - vendoredHeaders: { - jsonContentType: 'application/vnd.elasticsearch+json; compatible-with=9', - ndjsonContentType: 'application/vnd.elasticsearch+x-ndjson; compatible-with=9', - accept: 'application/vnd.elasticsearch+json; compatible-with=9,text/plain' - }, redaction: options.redaction - }) + } + if (options.serverMode !== 'serverless') { + transportOptions = Object.assign({}, transportOptions, { + sniffInterval: options.sniffInterval, + sniffOnStart: options.sniffOnStart, + sniffOnConnectionFault: options.sniffOnConnectionFault, + sniffEndpoint: options.sniffEndpoint, + nodeFilter: options.nodeFilter, + nodeSelector: options.nodeSelector, + vendoredHeaders: { + jsonContentType: 'application/vnd.elasticsearch+json; compatible-with=9', + ndjsonContentType: 'application/vnd.elasticsearch+x-ndjson; compatible-with=9', + accept: 'application/vnd.elasticsearch+json; compatible-with=9,text/plain' + } + }) + } + + this.transport = new options.Transport(transportOptions) this.helpers = new Helpers({ client: this, diff --git a/src/helpers.ts b/src/helpers.ts index 0043a8ab5..e8a64545a 100644 --- a/src/helpers.ts +++ b/src/helpers.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License") you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ /* eslint-disable @typescript-eslint/naming-convention */ @@ -25,7 +11,7 @@ import assert from 'node:assert' import * as timersPromises from 'node:timers/promises' import { Readable } from 'node:stream' import { errors, TransportResult, TransportRequestOptions, TransportRequestOptionsWithMeta } from '@elastic/transport' -import { Table, TypeMap, tableFromIPC, RecordBatchStreamReader } from 'apache-arrow/Arrow.node' +import { Table, TypeMap, tableFromIPC, AsyncRecordBatchStreamReader } from 'apache-arrow/Arrow.node' import Client from './client' import * as T from './api/types' import { Id } from './api/types' @@ -55,7 +41,7 @@ export interface MsearchHelperOptions extends T.MsearchRequest { export interface MsearchHelper extends Promise { stop: (error?: Error | null) => void - search: (header: T.MsearchMultisearchHeader, body: T.MsearchMultisearchBody) => Promise> + search: (header: T.MsearchMultisearchHeader, body: T.SearchSearchRequestBody) => Promise> } export interface MsearchHelperResponse { @@ -146,19 +132,10 @@ export interface EsqlColumn { type: string } -export type EsqlValue = any[] - -export type EsqlRow = EsqlValue[] - -export interface EsqlResponse { - columns: EsqlColumn[] - values: EsqlRow[] -} - export interface EsqlHelper { toRecords: () => Promise> toArrowTable: () => Promise> - toArrowReader: () => Promise + toArrowReader: () => Promise } export interface EsqlToRecords { @@ -376,7 +353,7 @@ export default class Helpers { // TODO: support abort a single search? // NOTE: the validation checks are synchronous and the callback/promise will // be resolved in the same tick. We might want to fix this in the future. - search (header: T.MsearchMultisearchHeader, body: T.MsearchMultisearchBody): Promise> { + search (header: T.MsearchMultisearchHeader, body: T.SearchSearchRequestBody): Promise> { if (stopReading) { const error = stopError === null ? new ConfigurationError('The msearch processor has been stopped') @@ -411,7 +388,7 @@ export default class Helpers { async function iterate (): Promise { const { semaphore, finish } = buildSemaphore() - const msearchBody: Array = [] + const msearchBody: Array = [] const callbacks: any[] = [] let loadedOperations = 0 timeoutRef = setTimeout(onFlushTimeout, flushInterval) // eslint-disable-line @@ -504,7 +481,7 @@ export default class Helpers { } } - function send (msearchBody: Array, callbacks: any[]): void { + function send (msearchBody: Array, callbacks: any[]): void { /* istanbul ignore if */ if (running > concurrency) { throw new Error('Max concurrency reached') @@ -522,7 +499,7 @@ export default class Helpers { } } - function msearchOperation (msearchBody: Array, callbacks: any[], done: () => void): void { + function msearchOperation (msearchBody: Array, callbacks: any[], done: () => void): void { let retryCount = retries // Instead of going full on async-await, which would make the code easier to read, @@ -530,7 +507,7 @@ export default class Helpers { // This because every time we use async await, V8 will create multiple promises // behind the scenes, making the code slightly slower. tryMsearch(msearchBody, callbacks, retrySearch) - function retrySearch (msearchBody: Array, callbacks: any[]): void { + function retrySearch (msearchBody: Array, callbacks: any[]): void { if (msearchBody.length > 0 && retryCount > 0) { retryCount -= 1 setTimeout(tryMsearch, wait, msearchBody, callbacks, retrySearch) @@ -542,7 +519,7 @@ export default class Helpers { // This function never returns an error, if the msearch operation fails, // the error is dispatched to all search executors. - function tryMsearch (msearchBody: Array, callbacks: any[], done: (msearchBody: Array, callbacks: any[]) => void): void { + function tryMsearch (msearchBody: Array, callbacks: any[], done: (msearchBody: Array, callbacks: any[]) => void): void { client.msearch(Object.assign({}, msearchOptions, { body: msearchBody }), reqOptions as TransportRequestOptionsWithMeta) .then(results => { const retryBody = [] @@ -977,7 +954,7 @@ export default class Helpers { esql (params: T.EsqlQueryRequest, reqOptions: TransportRequestOptions = {}): EsqlHelper { const client = this[kClient] - function toRecords (response: EsqlResponse): TDocument[] { + function toRecords (response: T.EsqlEsqlResult): TDocument[] { const { columns, values } = response return values.map(row => { const doc: Partial = {} @@ -1004,8 +981,7 @@ export default class Helpers { params.format = 'json' params.columnar = false - // @ts-expect-error it's typed as ArrayBuffer but we know it will be JSON - const response: EsqlResponse = await client.esql.query(params, reqOptions) + const response = await client.esql.query(params, reqOptions) const records: TDocument[] = toRecords(response) const { columns } = response return { records, columns } @@ -1019,11 +995,12 @@ export default class Helpers { params.format = 'arrow' - const response = await client.esql.query(params, reqOptions) + // @ts-expect-error the return type will be ArrayBuffer when the format is set to 'arrow' + const response: ArrayBuffer = await client.esql.query(params, reqOptions) return tableFromIPC(response) }, - async toArrowReader (): Promise { + async toArrowReader (): Promise { if (metaHeader !== null) { reqOptions.headers = reqOptions.headers ?? {} reqOptions.headers['x-elastic-client-meta'] = `${metaHeader as string},h=qa` @@ -1032,8 +1009,9 @@ export default class Helpers { params.format = 'arrow' - const response = await client.esql.query(params, reqOptions) - return RecordBatchStreamReader.from(response) + // @ts-expect-error response is a Readable when asStream is true + const response: Readable = await client.esql.query(params, reqOptions) + return await AsyncRecordBatchStreamReader.from(Readable.from(response)) } } diff --git a/src/sniffingTransport.ts b/src/sniffingTransport.ts index 7c9cec43c..389c54c3d 100644 --- a/src/sniffingTransport.ts +++ b/src/sniffingTransport.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License") you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ import assert from 'node:assert' diff --git a/test/esm/test-import.mjs b/test/esm/test-import.mjs index f7a6f09e6..693ac3e18 100644 --- a/test/esm/test-import.mjs +++ b/test/esm/test-import.mjs @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ import { Client } from '@elastic/elasticsearch' diff --git a/test/integration/helper.js b/test/integration/helper.js index fe4e0b422..bfe2535fa 100644 --- a/test/integration/helper.js +++ b/test/integration/helper.js @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ 'use strict' diff --git a/test/integration/helpers/bulk.test.js b/test/integration/helpers/bulk.test.js index a1b2be118..bffad53b1 100644 --- a/test/integration/helpers/bulk.test.js +++ b/test/integration/helpers/bulk.test.js @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ 'use strict' diff --git a/test/integration/helpers/msearch.test.js b/test/integration/helpers/msearch.test.js index fb317b0f7..479ddfec7 100644 --- a/test/integration/helpers/msearch.test.js +++ b/test/integration/helpers/msearch.test.js @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ 'use strict' diff --git a/test/integration/helpers/scroll.test.js b/test/integration/helpers/scroll.test.js index 36f3b8528..6d5148a9e 100644 --- a/test/integration/helpers/scroll.test.js +++ b/test/integration/helpers/scroll.test.js @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ 'use strict' diff --git a/test/integration/helpers/search.test.js b/test/integration/helpers/search.test.js index 7a6946a9f..2f0512177 100644 --- a/test/integration/helpers/search.test.js +++ b/test/integration/helpers/search.test.js @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ 'use strict' diff --git a/test/integration/index.js b/test/integration/index.js index b07ddd2d7..a4d51ea4e 100644 --- a/test/integration/index.js +++ b/test/integration/index.js @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ 'use strict' @@ -24,436 +10,63 @@ process.on('unhandledRejection', function (err) { process.exit(1) }) -const { writeFileSync, readFileSync, readdirSync, statSync } = require('fs') -const { join, sep } = require('path') -const yaml = require('js-yaml') -const minimist = require('minimist') -const ms = require('ms') -const { Client } = require('../../index') -const build = require('./test-runner') -const { sleep } = require('./helper') -const createJunitReporter = require('./reporter') +const assert = require('node:assert') +const url = require('node:url') +const fs = require('node:fs') +const path = require('node:path') +const globby = require('globby') +const semver = require('semver') const downloadArtifacts = require('../../scripts/download-artifacts') -const yamlFolder = downloadArtifacts.locations.freeTestFolder -const xPackYamlFolder = downloadArtifacts.locations.xPackTestFolder +const buildTests = require('./test-builder') -const MAX_API_TIME = 1000 * 90 -const MAX_FILE_TIME = 1000 * 30 -const MAX_TEST_TIME = 1000 * 3 +const yamlFolder = downloadArtifacts.locations.testYamlFolder -const options = minimist(process.argv.slice(2), { - boolean: ['bail'], - string: ['suite', 'test'], -}) - -const freeSkips = { - // working on fixes for these - '/free/aggregations/bucket_selector.yml': ['bad script'], - '/free/aggregations/bucket_script.yml': ['bad script'], - - // either the YAML test definition is wrong, or this fails because JSON.stringify is coercing "1.0" to "1" - '/free/aggregations/percentiles_bucket.yml': ['*'], - - // not supported yet - '/free/cluster.desired_nodes/10_basic.yml': ['*'], - - // Cannot find methods on `Internal` object - '/free/cluster.desired_balance/10_basic.yml': ['*'], - '/free/cluster.desired_nodes/20_dry_run.yml': ['*'], - '/free/cluster.prevalidate_node_removal/10_basic.yml': ['*'], - - // the v8 client never sends the scroll_id in querystring, - // the way the test is structured causes a security exception - 'free/scroll/10_basic.yml': ['Body params override query string'], - 'free/scroll/11_clear.yml': [ - 'Body params with array param override query string', - 'Body params with string param scroll id override query string' - ], - 'free/cat.allocation/10_basic.yml': ['*'], - 'free/cat.snapshots/10_basic.yml': ['Test cat snapshots output'], - - 'indices.stats/50_disk_usage.yml': ['Disk usage stats'], - 'indices.stats/60_field_usage.yml': ['Field usage stats'], - - // skipping because we are booting ES with `discovery.type=single-node` - // and this test will fail because of this configuration - 'nodes.stats/30_discovery.yml': ['*'], - - // the expected error is returning a 503, - // which triggers a retry and the node to be marked as dead - 'search.aggregation/240_max_buckets.yml': ['*'], - - // long values and json do not play nicely together - 'search.aggregation/40_range.yml': ['Min and max long range bounds'], - - // the yaml runner assumes that null means "does not exists", - // while null is a valid json value, so the check will fail - 'search/320_disallow_queries.yml': ['Test disallow expensive queries'], - 'free/tsdb/90_unsupported_operations.yml': ['noop update'], -} - -const platinumDenyList = { - 'api_key/10_basic.yml': ['Test get api key'], - 'api_key/20_query.yml': ['*'], - 'api_key/11_invalidation.yml': ['Test invalidate api key by realm name'], - 'analytics/histogram.yml': ['Histogram requires values in increasing order'], - - // object keys must me strings, and `0.0.toString()` is `0` - 'ml/evaluate_data_frame.yml': [ - 'Test binary_soft_classifition precision', - 'Test binary_soft_classifition recall', - 'Test binary_soft_classifition confusion_matrix' - ], - - // The cleanup fails with a index not found when retrieving the jobs - 'ml/get_datafeed_stats.yml': ['Test get datafeed stats when total_search_time_ms mapping is missing'], - 'ml/bucket_correlation_agg.yml': ['Test correlation bucket agg simple'], - - // start should be a string - 'ml/jobs_get_result_overall_buckets.yml': ['Test overall buckets given epoch start and end params'], - - // this can't happen with the client - 'ml/start_data_frame_analytics.yml': ['Test start with inconsistent body/param ids'], - 'ml/stop_data_frame_analytics.yml': ['Test stop with inconsistent body/param ids'], - 'ml/preview_datafeed.yml': ['*'], - - // Investigate why is failing - 'ml/inference_crud.yml': ['*'], - 'ml/categorization_agg.yml': ['Test categorization aggregation with poor settings'], - 'ml/filter_crud.yml': ['*'], - - // investigate why this is failing - 'monitoring/bulk/10_basic.yml': ['*'], - 'monitoring/bulk/20_privileges.yml': ['*'], - 'license/20_put_license.yml': ['*'], - 'snapshot/10_basic.yml': ['*'], - 'snapshot/20_operator_privileges_disabled.yml': ['*'], - - // the body is correct, but the regex is failing - 'sql/sql.yml': ['Getting textual representation'], - 'searchable_snapshots/10_usage.yml': ['*'], - 'service_accounts/10_basic.yml': ['*'], - - // we are setting two certificates in the docker config - 'ssl/10_basic.yml': ['*'], - 'token/10_basic.yml': ['*'], - 'token/11_invalidation.yml': ['*'], - - // very likely, the index template has not been loaded yet. - // we should run a indices.existsTemplate, but the name of the - // template may vary during time. - 'transforms_crud.yml': [ - 'Test basic transform crud', - 'Test transform with query and array of indices in source', - 'Test PUT continuous transform', - 'Test PUT continuous transform without delay set' - ], - 'transforms_force_delete.yml': [ - 'Test force deleting a running transform' - ], - 'transforms_cat_apis.yml': ['*'], - 'transforms_start_stop.yml': ['*'], - 'transforms_stats.yml': ['*'], - 'transforms_stats_continuous.yml': ['*'], - 'transforms_update.yml': ['*'], - - // js does not support ulongs - 'unsigned_long/10_basic.yml': ['*'], - 'unsigned_long/20_null_value.yml': ['*'], - 'unsigned_long/30_multi_fields.yml': ['*'], - 'unsigned_long/40_different_numeric.yml': ['*'], - 'unsigned_long/50_script_values.yml': ['*'], - - // the v8 client flattens the body into the parent object - 'platinum/users/10_basic.yml': ['Test put user with different username in body'], - - // docker issue? - 'watcher/execute_watch/60_http_input.yml': ['*'], - - // the checks are correct, but for some reason the test is failing on js side - // I bet is because the backslashes in the rg - 'watcher/execute_watch/70_invalid.yml': ['*'], - 'watcher/put_watch/10_basic.yml': ['*'], - 'xpack/15_basic.yml': ['*'], - - // test that are failing that needs to be investigated - // the error cause can either be in the yaml test or in the specification - - // start should be a string in the yaml test - 'platinum/ml/delete_job_force.yml': ['Test force delete an open job that is referred by a started datafeed'], - 'platinum/ml/evaluate_data_frame.yml': ['*'], - 'platinum/ml/get_datafeed_stats.yml': ['*'], - - // start should be a string in the yaml test - 'platinum/ml/start_stop_datafeed.yml': ['*'], -} - -function runner (opts = {}) { - const options = { node: opts.node } - if (opts.isXPack) { - options.tls = { - ca: readFileSync(join(__dirname, '..', '..', '.buildkite', 'certs', 'ca.crt'), 'utf8'), - rejectUnauthorized: false +const getAllFiles = async dir => { + const files = await globby(dir, { + expandDirectories: { + extensions: ['yml', 'yaml'] } - } - const client = new Client(options) - log('Loading yaml suite') - start({ client, isXPack: opts.isXPack }) - .catch(err => { - if (err.name === 'ResponseError') { - console.error(err) - console.log(JSON.stringify(err.meta, null, 2)) - } else { - console.error(err) - } - process.exit(1) - }) + }) + return files.sort() } -async function waitCluster (client, times = 0) { - try { - await client.cluster.health({ wait_for_status: 'green', timeout: '50s' }) - } catch (err) { - if (++times < 10) { - await sleep(5000) - return waitCluster(client, times) - } - console.error(err) - process.exit(1) - } -} - -async function start ({ client, isXPack }) { - log('Waiting for Elasticsearch') - await waitCluster(client) - - const body = await client.info() - const { number: version, build_hash: hash } = body.version - - log(`Downloading artifacts for hash ${hash}...`) - await downloadArtifacts({ hash, version }) - - log(`Testing ${isXPack ? 'Platinum' : 'Free'} api...`) - const junit = createJunitReporter() - const junitTestSuites = junit.testsuites(`Integration test for ${isXPack ? 'Platinum' : 'Free'} api`) - - const stats = { - total: 0, - skip: 0, - pass: 0, - assertions: 0 - } - const folders = getAllFiles(isXPack ? xPackYamlFolder : yamlFolder) - .filter(t => !/(README|TODO)/g.test(t)) - // we cluster the array based on the folder names, - // to provide a better test log output - .reduce((arr, file) => { - const path = file.slice(file.indexOf('/rest-api-spec/test'), file.lastIndexOf('/')) - let inserted = false - for (let i = 0; i < arr.length; i++) { - if (arr[i][0].includes(path)) { - inserted = true - arr[i].push(file) - break - } - } - if (!inserted) arr.push([file]) - return arr - }, []) - - const totalTime = now() - for (const folder of folders) { - // pretty name - const apiName = folder[0].slice( - folder[0].indexOf(`${sep}rest-api-spec${sep}test`) + 19, - folder[0].lastIndexOf(sep) - ) - - log('Testing ' + apiName.slice(1)) - const apiTime = now() - - for (const file of folder) { - const testRunner = build({ - client, - version, - isXPack: file.includes('platinum') - }) - const fileTime = now() - const data = readFileSync(file, 'utf8') - // get the test yaml (as object), some file has multiple yaml documents inside, - // every document is separated by '---', so we split on the separator - // and then we remove the empty strings, finally we parse them - const tests = data - .split('\n---\n') - .map(s => s.trim()) - // empty strings - .filter(Boolean) - .map(parse) - // null values - .filter(Boolean) - - // get setup and teardown if present - let setupTest = null - let teardownTest = null - for (const test of tests) { - if (test.setup) setupTest = test.setup - if (test.teardown) teardownTest = test.teardown - } - - const cleanPath = file.slice(file.lastIndexOf(apiName)) - - // skip if --suite CLI arg doesn't match - if (options.suite && !cleanPath.endsWith(options.suite)) continue - - log(' ' + cleanPath) - const junitTestSuite = junitTestSuites.testsuite(apiName.slice(1) + ' - ' + cleanPath) - - for (const test of tests) { - const testTime = now() - const name = Object.keys(test)[0] - - // skip setups, teardowns and anything that doesn't match --test flag when present - if (name === 'setup' || name === 'teardown') continue - if (options.test && !name.endsWith(options.test)) continue - - const junitTestCase = junitTestSuite.testcase(name, `node_${process.version}: ${cleanPath}`) - - stats.total += 1 - if (shouldSkip(isXPack, file, name)) { - stats.skip += 1 - junitTestCase.skip('This test is in the skip list of the client') - junitTestCase.end() - continue - } - log(' - ' + name) - try { - await testRunner.run(setupTest, test[name], teardownTest, stats, junitTestCase) - stats.pass += 1 - } catch (err) { - junitTestCase.failure(err) - junitTestCase.end() - junitTestSuite.end() - junitTestSuites.end() - generateJunitXmlReport(junit, isXPack ? 'platinum' : 'free') - err.meta = JSON.stringify(err.meta ?? {}, null, 2) - console.error(err) - - if (options.bail) { - process.exit(1) - } else { - continue - } - } - const totalTestTime = now() - testTime - junitTestCase.end() - if (totalTestTime > MAX_TEST_TIME) { - log(' took too long: ' + ms(totalTestTime)) - } else { - log(' took: ' + ms(totalTestTime)) - } - } - junitTestSuite.end() - const totalFileTime = now() - fileTime - if (totalFileTime > MAX_FILE_TIME) { - log(` ${cleanPath} took too long: ` + ms(totalFileTime)) - } else { - log(` ${cleanPath} took: ` + ms(totalFileTime)) - } - } - const totalApiTime = now() - apiTime - if (totalApiTime > MAX_API_TIME) { - log(`${apiName} took too long: ` + ms(totalApiTime)) - } else { - log(`${apiName} took: ` + ms(totalApiTime)) - } - } - junitTestSuites.end() - generateJunitXmlReport(junit, isXPack ? 'platinum' : 'free') - log(`Total testing time: ${ms(now() - totalTime)}`) - log(`Test stats: - - Total: ${stats.total} - - Skip: ${stats.skip} - - Pass: ${stats.pass} - - Fail: ${stats.total - (stats.pass + stats.skip)} - - Assertions: ${stats.assertions} - `) -} - -function log (text) { - process.stdout.write(text + '\n') -} - -function now () { - const ts = process.hrtime() - return (ts[0] * 1e3) + (ts[1] / 1e6) -} - -function parse (data) { - let doc - try { - doc = yaml.load(data, { schema: yaml.CORE_SCHEMA }) - } catch (err) { - console.error(err) - return - } - return doc -} - -function generateJunitXmlReport (junit, suite) { - writeFileSync( - join(__dirname, '..', '..', `${suite}-report-junit.xml`), - junit.prettyPrint() - ) +async function doTestBuilder (version, clientOptions) { + await downloadArtifacts(undefined, version) + const files = await getAllFiles(yamlFolder) + await buildTests(files, clientOptions) } if (require.main === module) { - const scheme = process.env.TEST_SUITE === 'platinum' ? 'https' : 'http' - const node = process.env.TEST_ES_SERVER || `${scheme}://elastic:changeme@localhost:9200` - const opts = { - node, - isXPack: process.env.TEST_SUITE !== 'free' - } - runner(opts) -} - -const shouldSkip = (isXPack, file, name) => { - if (options.suite || options.test) return false - - let list = Object.keys(freeSkips) - for (let i = 0; i < list.length; i++) { - const freeTest = freeSkips[list[i]] - for (let j = 0; j < freeTest.length; j++) { - if (file.endsWith(list[i]) && (name === freeTest[j] || freeTest[j] === '*')) { - const testName = file.slice(file.indexOf(`${sep}elasticsearch${sep}`)) + ' / ' + name - log(`Skipping test ${testName} because it is denylisted in the free test suite`) - return true - } - } + const node = process.env.TEST_ES_SERVER + const apiKey = process.env.ES_API_SECRET_KEY + const password = process.env.ELASTIC_PASSWORD + let version = process.env.STACK_VERSION + + assert(node != null, 'Environment variable missing: TEST_ES_SERVER') + assert(apiKey != null || password != null, 'Environment variable missing: ES_API_SECRET_KEY or ELASTIC_PASSWORD') + assert(version != null, 'Environment variable missing: STACK_VERSION') + + version = semver.clean(version.includes('SNAPSHOT') ? version.split('-')[0] : version) + + const clientOptions = { node } + if (apiKey != null) { + clientOptions.auth = { apiKey } + } else { + clientOptions.auth = { username: 'elastic', password } } - - if (file.includes('x-pack') || isXPack) { - list = Object.keys(platinumDenyList) - for (let i = 0; i < list.length; i++) { - const platTest = platinumDenyList[list[i]] - for (let j = 0; j < platTest.length; j++) { - if (file.endsWith(list[i]) && (name === platTest[j] || platTest[j] === '*')) { - const testName = file.slice(file.indexOf(`${sep}elasticsearch${sep}`)) + ' / ' + name - log(`Skipping test ${testName} because it is denylisted in the platinum test suite`) - return true - } - } + const nodeUrl = new url.URL(node) + if (nodeUrl.protocol === 'https:') { + clientOptions.tls = { + ca: fs.readFileSync(path.join(__dirname, '..', '..', '.buildkite', 'certs', 'ca.crt'), 'utf8'), + rejectUnauthorized: false } } - return false + doTestBuilder(version, clientOptions) + .then(() => process.exit(0)) + .catch(err => { + console.error(err) + process.exit(1) + }) } - -const getAllFiles = dir => - readdirSync(dir).reduce((files, file) => { - const name = join(dir, file) - const isDirectory = statSync(name).isDirectory() - return isDirectory ? [...files, ...getAllFiles(name)] : [...files, name] - }, []) - -module.exports = runner diff --git a/test/integration/reporter.js b/test/integration/reporter.js deleted file mode 100644 index d94e09ba3..000000000 --- a/test/integration/reporter.js +++ /dev/null @@ -1,110 +0,0 @@ -'use strict' - -const assert = require('node:assert') -const { create } = require('xmlbuilder2') - -function createJunitReporter () { - const report = {} - - return { testsuites, prettyPrint } - - function prettyPrint () { - return create(report).end({ prettyPrint: true }) - } - - function testsuites (name) { - assert(name, 'The testsuites name is required') - assert(report.testsuites === undefined, 'Cannot set more than one testsuites block') - const startTime = Date.now() - - report.testsuites = { - '@id': new Date().toISOString(), - '@name': name - } - - const testsuiteList = [] - - return { - testsuite: createTestSuite(testsuiteList), - end () { - report.testsuites['@time'] = Math.round((Date.now() - startTime) / 1000) - report.testsuites['@tests'] = testsuiteList.reduce((acc, val) => { - acc += val['@tests'] - return acc - }, 0) - report.testsuites['@failures'] = testsuiteList.reduce((acc, val) => { - acc += val['@failures'] - return acc - }, 0) - report.testsuites['@skipped'] = testsuiteList.reduce((acc, val) => { - acc += val['@skipped'] - return acc - }, 0) - if (testsuiteList.length) { - report.testsuites.testsuite = testsuiteList - } - } - } - } - - function createTestSuite (testsuiteList) { - return function testsuite (name) { - assert(name, 'The testsuite name is required') - const startTime = Date.now() - const suite = { - '@id': new Date().toISOString(), - '@name': name - } - const testcaseList = [] - testsuiteList.push(suite) - return { - testcase: createTestCase(testcaseList), - end () { - suite['@time'] = Math.round((Date.now() - startTime) / 1000) - suite['@tests'] = testcaseList.length - suite['@failures'] = testcaseList.filter(t => t.failure).length - suite['@skipped'] = testcaseList.filter(t => t.skipped).length - if (testcaseList.length) { - suite.testcase = testcaseList - } - } - } - } - } - - function createTestCase (testcaseList) { - return function testcase (name, file) { - assert(name, 'The testcase name is required') - const startTime = Date.now() - const tcase = { - '@id': new Date().toISOString(), - '@name': name - } - if (file) tcase['@file'] = file - testcaseList.push(tcase) - return { - failure (error) { - assert(error, 'The failure error object is required') - tcase.failure = { - '#': error.stack, - '@message': error.message, - '@type': error.code - } - }, - skip (reason) { - if (typeof reason !== 'string') { - reason = JSON.stringify(reason, null, 2) - } - tcase.skipped = { - '#': reason - } - }, - end () { - tcase['@time'] = Math.round((Date.now() - startTime) / 1000) - } - } - } - } -} - -module.exports = createJunitReporter diff --git a/test/integration/test-builder.js b/test/integration/test-builder.js new file mode 100644 index 000000000..64ce97dd2 --- /dev/null +++ b/test/integration/test-builder.js @@ -0,0 +1,482 @@ +/* + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +'use strict' + +const { join, sep } = require('node:path') +const { readFileSync, writeFileSync, promises } = require('node:fs') +const yaml = require('js-yaml') +const { rimraf } = require('rimraf') +const { mkdir } = promises + +const generatedTestsPath = join(__dirname, '..', '..', 'generated-tests') + +const stackSkips = [ + // test definition bug: response is empty string + 'cat/fielddata.yml', + // test definition bug: response is empty string + 'cluster/delete_voting_config_exclusions.yml', + // test definition bug: response is empty string + 'cluster/voting_config_exclusions.yml', + // client bug: ILM request takes a "body" param, but "body" is a special keyword in the JS client + 'ilm/10_basic.yml', + // health report is... not healthy + 'health_report.yml', + // TODO: `contains` action only supports checking for primitives inside arrays or strings inside strings, not referenced values like objects inside arrays + 'entsearch/10_basic.yml', + // test definition bug: error message does not match + 'entsearch/30_sync_jobs_stack.yml', + // no handler found for uri [/knn_test/_knn_search] + 'knn_search.yml', + // TODO: fix license on ES startup - "Operation failed: Current license is basic." + 'license/10_stack.yml', + // response.body should be truthy. found: "" + 'logstash/10_basic.yml', + // test definition bug? security_exception: unable to authenticate user [x_pack_rest_user] for REST request [/_ml/trained_models/test_model/definition/0] + 'machine_learning/clear_tm_deployment_cache.yml', + // client bug: 0.99995 does not equal 0.5 + 'machine_learning/data_frame_evaluate.yml', + // test definition bug? regex has whitespace, maybe needs to be removed + 'machine_learning/explain_data_frame_analytics.yml', + // client bug: 4 != 227 + 'machine_learning/preview_datafeed.yml', + // test definition bug: error message does not match + 'machine_learning/revert_model_snapshot.yml', + // test definition bug: error message does not match + 'machine_learning/update_model_snapshot.yml', + // version_conflict_engine_exception + 'machine_learning/jobs_crud.yml', + // test definition bug: error message does not match + 'machine_learning/model_snapshots.yml', + // test definition bug: error message does not match + 'query_rules/30_test.yml', + // client bug: 0 != 0.1 + 'script/10_basic.yml', + // client bug: request takes a "body" param, but "body" is a special keyword in the JS client + 'searchable_snapshots/10_basic.yml', + // test builder bug: does `match` action need to support "array contains value"? + 'security/10_api_key_basic.yml', + // test definition bug: error message does not match + 'security/140_user.yml', + // test definition bug: error message does not match + 'security/30_privileges_stack.yml', + // unknown issue: $profile.enabled path doesn't exist in response + 'security/130_user_profile.yml', + // test definition bug: error message does not match + 'security/change_password.yml', + // test builder bug: media_type_header_exception + 'simulate/ingest.yml', + // client bug: request takes a "body" param, but "body" is a special keyword in the JS client + 'snapshot/10_basic.yml', + // test definition bug: illegal_argument_exception + 'sql/10_basic.yml', + // test definition bug: illegal_argument_exception + 'text_structure/10_basic.yml', + // test definition bug: illegal_argument_exception + 'transform/10_basic.yml', +] + +const serverlessSkips = [ + // TODO: sql.getAsync does not set a content-type header but ES expects one + // transport only sets a content-type if the body is not empty + 'sql/10_basic.yml', + // TODO: bulk call in setup fails due to "malformed action/metadata line" + // bulk body is being sent as a Buffer, unsure if related. + 'transform/10_basic.yml', + // TODO: scripts_painless_execute expects {"result":"0.1"}, gets {"result":"0"} + // body sent as Buffer, unsure if related + 'script/10_basic.yml', + // TODO: expects {"outlier_detection.auc_roc.value":0.99995}, gets {"outlier_detection.auc_roc.value":0.5} + // remove if/when https://github.com/elastic/elasticsearch-clients-tests/issues/37 is resolved + 'machine_learning/data_frame_evaluate.yml', + // TODO: Cannot perform requested action because job [job-crud-test-apis] is not open + 'machine_learning/jobs_crud.yml', + // TODO: test runner needs to support ignoring 410 errors + 'enrich/10_basic.yml', + // TODO: parameter `enabled` is not allowed in source + // Same underlying problem as https://github.com/elastic/elasticsearch-clients-tests/issues/55 + 'cluster/component_templates.yml', + // TODO: expecting `ct_field` field mapping to be returned, but instead only finds `field` + 'indices/simulate_template.yml', + 'indices/simulate_index_template.yml', + // TODO: test currently times out + 'inference/10_basic.yml', + // TODO: Fix: "Trained model deployment [test_model] is not allocated to any nodes" + 'machine_learning/20_trained_model_serverless.yml', + // TODO: query_rules api not available yet + 'query_rules/10_query_rules.yml', + 'query_rules/20_rulesets.yml', + 'query_rules/30_test.yml', + // TODO: security.putRole API not available + 'security/50_roles_serverless.yml', + // TODO: expected undefined to equal 'some_table' + 'entsearch/50_connector_updates.yml', + // TODO: resource_not_found_exception + 'tasks_serverless.yml', +] + +function parse (data) { + let doc + try { + doc = yaml.load(data, { schema: yaml.CORE_SCHEMA }) + } catch (err) { + console.error(err) + return + } + return doc +} + +async function build (yamlFiles, clientOptions) { + await rimraf(generatedTestsPath) + await mkdir(generatedTestsPath, { recursive: true }) + + for (const file of yamlFiles) { + const apiName = file.split(`${sep}tests${sep}`)[1] + const data = readFileSync(file, 'utf8') + + const tests = data + .split('\n---\n') + .map(s => s.trim()) + // empty strings + .filter(Boolean) + .map(parse) + // null values + .filter(Boolean) + + let code = "import { test } from 'tap'\n" + code += "import { Client } from '@elastic/elasticsearch'\n\n" + + const requires = tests.find(test => test.requires != null) + let skip = new Set() + if (requires != null) { + const { serverless = true, stack = true } = requires.requires + if (!serverless) skip.add('process.env.TEST_ES_SERVERLESS === "1"') + if (!stack) skip.add('process.env.TEST_ES_STACK === "1"') + } + + if (stackSkips.includes(apiName)) skip.add('process.env.TEST_ES_STACK === "1"') + if (serverlessSkips.includes(apiName)) skip.add('process.env.TEST_ES_SERVERLESS === "1"') + + if (skip.size > 0) { + code += `test('${apiName}', { skip: ${Array.from(skip).join(' || ')} }, t => {\n` + } else { + code += `test('${apiName}', t => {\n` + } + + for (const test of tests) { + if (test.setup != null) { + code += ' t.before(async () => {\n' + code += indent(buildActions(test.setup), 4) + code += ' })\n\n' + } + + if (test.teardown != null) { + code += ' t.after(async () => {\n' + code += indent(buildActions(test.teardown), 4) + code += ' })\n\n' + } + + for (const key of Object.keys(test).filter(k => !['setup', 'teardown', 'requires'].includes(k))) { + if (test[key].find(action => Object.keys(action)[0] === 'skip') != null) { + code += ` t.test('${key}', { skip: true }, async t => {\n` + } else { + code += ` t.test('${key}', async t => {\n` + } + code += indent(buildActions(test[key]), 4) + code += '\n t.end()\n' + code += ' })\n' + } + // if (test.requires != null) requires = test.requires + } + + code += '\n t.end()\n' + code += '})\n' + + const testDir = join(generatedTestsPath, apiName.split(sep).slice(0, -1).join(sep)) + const testFile = join(testDir, apiName.split(sep).pop().replace(/\.ya?ml$/, '.mjs')) + await mkdir(testDir, { recursive: true }) + writeFileSync(testFile, code, 'utf8') + } + + function buildActions (actions) { + let code = `const client = new Client(${JSON.stringify(clientOptions, null, 2)})\n` + code += 'let response\n\n' + + const vars = new Set() + + for (const action of actions) { + const key = Object.keys(action)[0] + switch (key) { + case 'do': + code += buildDo(action.do) + break + case 'set': + const setResult = buildSet(action.set, vars) + vars.add(setResult.varName) + code += setResult.code + break + case 'transform_and_set': + code += buildTransformAndSet(action.transform_and_set) + break + case 'match': + code += buildMatch(action.match) + break + case 'lt': + code += buildLt(action.lt) + break + case 'lte': + code += buildLte(action.lte) + break + case 'gt': + code += buildGt(action.gt) + break + case 'gte': + code += buildGte(action.gte) + break + case 'length': + code += buildLength(action.length) + break + case 'is_true': + code += buildIsTrue(action.is_true) + break + case 'is_false': + code += buildIsFalse(action.is_false) + break + case 'contains': + code += buildContains(action.contains) + break + case 'exists': + code += buildExists(action.exists) + break + case 'skip': + break + default: + console.warn(`Action not supported: ${key}`) + break + } + } + return code + } +} + +function buildDo (action) { + let code = '' + const keys = Object.keys(action) + if (keys.includes('catch')) { + code += 'try {\n' + code += indent(buildRequest(action), 2) + code += '} catch (err) {\n' + code += ` t.match(err.toString(), ${buildValLiteral(action.catch)})\n` + code += '}\n' + } else { + code += buildRequest(action) + } + return code +} + +function buildRequest(action) { + let code = '' + + const options = { meta: true } + + for (const key of Object.keys(action)) { + if (key === 'catch') continue + + if (key === 'headers') { + options.headers = action.headers + continue + } + + const params = action[key] + if (params.ignore != null) { + if (Array.isArray(params.ignore)) { + options.ignore = params.ignore + } else { + options.ignore = [params.ignore] + } + } + + code += `response = await client.${toCamelCase(key)}(${buildApiParams(action[key])}, ${JSON.stringify(options)})\n` + } + return code +} + +function buildSet (action, vars) { + const key = Object.keys(action)[0] + const varName = action[key] + const lookup = buildLookup(key) + + let code = '' + if (vars.has(varName)) { + code = `${varName} = ${lookup}\n` + } else { + code =`let ${varName} = ${lookup}\n` + } + return { code, varName } +} + +function buildTransformAndSet (action) { + return `// TODO buildTransformAndSet: ${JSON.stringify(action)}\n` +} + +function buildMatch (action) { + const key = Object.keys(action)[0] + let lookup = buildLookup(key) + const val = buildValLiteral(action[key]) + return `t.match(${lookup}, ${val})\n` +} + +function buildLt (action) { + const key = Object.keys(action)[0] + const lookup = buildLookup(key) + const val = buildValLiteral(action[key]) + return `t.ok(${lookup} < ${val})\n` +} + +function buildLte (action) { + const key = Object.keys(action)[0] + const lookup = buildLookup(key) + const val = buildValLiteral(action[key]) + return `t.ok(${lookup} <= ${val})\n` +} + +function buildGt (action) { + const key = Object.keys(action)[0] + const lookup = buildLookup(key) + const val = buildValLiteral(action[key]) + return `t.ok(${lookup} > ${val})\n` +} + +function buildGte (action) { + const key = Object.keys(action)[0] + const lookup = buildLookup(key) + const val = buildValLiteral(action[key]) + return `t.ok(${lookup} >= ${val})\n` +} + +function buildLength (action) { + const key = Object.keys(action)[0] + const lookup = buildLookup(key) + const val = buildValLiteral(action[key]) + + let code = '' + code += `if (typeof ${lookup} === 'object' && !Array.isArray(${lookup})) {\n` + code += ` t.equal(Object.keys(${lookup}).length, ${val})\n` + code += `} else {\n` + code += ` t.equal(${lookup}.length, ${val})\n` + code += `}\n` + return code +} + +function buildIsTrue (action) { + let lookup = `${buildLookup(action)}` + let errMessage = `\`${action} should be truthy. found: '\$\{JSON.stringify(${lookup})\}'\`` + if (lookup.includes('JSON.stringify')) errMessage = `\`${action} should be truthy. found: '\$\{${lookup}\}'\`` + return `t.ok(${lookup} === "true" || (Boolean(${lookup}) && ${lookup} !== "false"), ${errMessage})\n` +} + +function buildIsFalse (action) { + let lookup = `${buildLookup(action)}` + let errMessage = `\`${action} should be falsy. found: '\$\{JSON.stringify(${lookup})\}'\`` + if (lookup.includes('JSON.stringify')) errMessage = `\`${action} should be falsy. found: '\$\{${lookup}\}'\`` + return `t.ok(${lookup} === "false" || !Boolean(${lookup}), ${errMessage})\n` +} + +function buildContains (action) { + const key = Object.keys(action)[0] + const lookup = buildLookup(key) + const val = buildValLiteral(action[key]) + return `t.ok(${lookup}.includes(${val}), '${JSON.stringify(val)} not found in ${key}')\n` +} + +function buildExists (keyName) { + const lookup = buildLookup(keyName) + return `t.ok(${lookup} != null, \`Key "${keyName}" not found in response body: \$\{JSON.stringify(response.body, null, 2)\}\`)\n` +} + +function buildApiParams (params) { + if (Object.keys(params).length === 0) { + return 'undefined' + } else { + const out = {} + Object.keys(params).filter(k => k !== 'ignore' && k !== 'headers').forEach(k => out[k] = params[k]) + return buildValLiteral(out) + } +} + +function toCamelCase (name) { + return name.replace(/_([a-z])/g, g => g[1].toUpperCase()) +} + +function indent (str, spaces) { + const tabs = ' '.repeat(spaces) + return str.replace(/\s+$/, '').split('\n').map(l => `${tabs}${l}`).join('\n') + '\n' +} + +function buildLookup (path) { + if (path === '$body') return '(typeof response.body === "string" ? response.body : JSON.stringify(response.body))' + + const outPath = path.split('.').map(step => { + if (parseInt(step, 10).toString() === step) { + return `[${step}]` + } else if (step.match(/^\$[a-zA-Z0-9_]+$/)) { + const lookup = step.replace(/^\$/, '') + if (lookup === 'body') return '' + return `[${lookup}]` + } else if (step === '') { + return '' + } else { + return `['${step}']` + } + }).join('') + return `response.body${outPath}` +} + +function buildValLiteral (val) { + if (typeof val === 'string') val = val.trim() + if (isRegExp(val)) { + return JSON.stringify(val).replace(/^"/, '').replace(/"$/, '').replaceAll('\\\\', '\\') + } else if (isVariable(val)) { + if (val === '$body') return 'JSON.stringify(response.body)' + return val.replace(/^\$/, '') + } else if (isPlainObject(val)) { + return JSON.stringify(cleanObject(val), null, 2).replace(/"\$([a-zA-Z0-9_]+)"/g, '$1') + } else { + return JSON.stringify(val) + } +} + +function isRegExp (str) { + return typeof str === 'string' && str.startsWith('/') && str.endsWith('/') +} + +function isVariable (str) { + return typeof str === 'string' && str.match(/^\$[a-zA-Z0-9_]+$/) != null +} + +function cleanObject (obj) { + Object.keys(obj).forEach(key => { + let val = obj[key] + if (typeof val === 'string' && val.trim().startsWith('{') && val.trim().endsWith('}')) { + // attempt to parse as object + try { + val = JSON.parse(val) + } catch { + } + } else if (isPlainObject(val)) { + val = cleanObject(val) + } else if (Array.isArray(val)) { + val = val.map(item => isPlainObject(item) ? cleanObject(item) : item) + } + obj[key] = val + }) + return obj +} + +function isPlainObject(obj) { + return typeof obj === 'object' && !Array.isArray(obj) && obj != null +} + +module.exports = build diff --git a/test/integration/test-runner.js b/test/integration/test-runner.js deleted file mode 100644 index ce80da43e..000000000 --- a/test/integration/test-runner.js +++ /dev/null @@ -1,1086 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -'use strict' - -/* eslint camelcase: 0 */ - -const chai = require('chai') -const semver = require('semver') -const helper = require('./helper') -const { join } = require('path') -const { locations } = require('../../scripts/download-artifacts') -const packageJson = require('../../package.json') - -chai.config.showDiff = true -chai.config.truncateThreshold = 0 -const { assert } = chai - -const { delve, to, isXPackTemplate, sleep, updateParams } = helper - -const supportedFeatures = [ - 'gtelte', - 'regex', - 'benchmark', - 'stash_in_path', - 'groovy_scripting', - 'headers', - 'transform_and_set', - 'catch_unauthorized', - 'arbitrary_key' -] - -function build (opts = {}) { - const client = opts.client - const esVersion = opts.version - const isXPack = opts.isXPack - const stash = new Map() - let response = null - - /** - * Runs a cleanup, removes all indices, aliases, templates, and snapshots - * @returns {Promise} - */ - async function cleanup (isXPack) { - response = null - stash.clear() - - await client.cluster.health({ - wait_for_no_initializing_shards: true, - timeout: '70s', - level: 'shards' - }) - - if (isXPack) { - // wipe rollup jobs - const jobsList = await client.rollup.getJobs({ id: '_all' }) - const jobsIds = jobsList.jobs.map(j => j.config.id) - await helper.runInParallel( - client, 'rollup.stopJob', - jobsIds.map(j => ({ id: j, wait_for_completion: true })) - ) - await helper.runInParallel( - client, 'rollup.deleteJob', - jobsIds.map(j => ({ id: j })) - ) - - // delete slm policies - const policies = await client.slm.getLifecycle() - await helper.runInParallel( - client, 'slm.deleteLifecycle', - Object.keys(policies).map(p => ({ policy_id: p })) - ) - - // remove 'x_pack_rest_user', used in some xpack test - try { - await client.security.deleteUser({ username: 'x_pack_rest_user' }, { ignore: [404] }) - } catch { - // do nothing - } - - const searchableSnapshotIndices = await client.cluster.state({ - metric: 'metadata', - filter_path: 'metadata.indices.*.settings.index.store.snapshot' - }) - if (searchableSnapshotIndices.metadata != null && searchableSnapshotIndices.metadata.indices != null) { - await helper.runInParallel( - client, 'indices.delete', - Object.keys(searchableSnapshotIndices.metadata.indices).map(i => ({ index: i })), - { ignore: [404] } - ) - } - } - - // clean snapshots - const repositories = await client.snapshot.getRepository() - for (const repository of Object.keys(repositories)) { - await client.snapshot.delete({ repository, snapshot: '*' }, { ignore: [404] }) - await client.snapshot.deleteRepository({ name: repository }, { ignore: [404] }) - } - - if (isXPack) { - // clean data streams - await client.indices.deleteDataStream({ name: '*', expand_wildcards: 'all' }) - } - - // clean all indices - await client.indices.delete({ - index: [ - '*', - '-.ds-ilm-history-*' - ], - expand_wildcards: 'open,closed,hidden' - }, { - ignore: [404] - }) - - // delete templates - const templates = await client.cat.templates({ h: 'name' }) - for (const template of templates.split('\n').filter(Boolean)) { - if (isXPackTemplate(template)) continue - const body = await client.indices.deleteTemplate({ name: template }, { ignore: [404] }) - if (JSON.stringify(body).includes(`index_template [${template}] missing`)) { - await client.indices.deleteIndexTemplate({ name: template }, { ignore: [404] }) - } - } - - // delete component template - const body = await client.cluster.getComponentTemplate() - const components = body.component_templates.filter(c => !isXPackTemplate(c.name)).map(c => c.name) - if (components.length > 0) { - try { - await client.cluster.deleteComponentTemplate({ name: components.join(',') }, { ignore: [404] }) - } catch { - // do nothing - } - } - - // Remove any cluster setting - const settings = await client.cluster.getSettings() - const newSettings = {} - for (const setting in settings) { - if (Object.keys(settings[setting]).length === 0) continue - newSettings[setting] = {} - for (const key in settings[setting]) { - newSettings[setting][`${key}.*`] = null - } - } - if (Object.keys(newSettings).length > 0) { - await client.cluster.putSettings(newSettings) - } - - if (isXPack) { - // delete ilm policies - const preserveIlmPolicies = [ - "ilm-history-ilm-policy", - "slm-history-ilm-policy", - "watch-history-ilm-policy", - "watch-history-ilm-policy-16", - "ml-size-based-ilm-policy", - "logs", - "metrics", - "synthetics", - "7-days-default", - "30-days-default", - "90-days-default", - "180-days-default", - "365-days-default", - ".fleet-actions-results-ilm-policy", - ".fleet-file-data-ilm-policy", - ".fleet-files-ilm-policy", - ".deprecation-indexing-ilm-policy", - ".monitoring-8-ilm-policy", - "behavioral_analytics-events-default_policy", - ] - const policies = await client.ilm.getLifecycle() - for (const policy in policies) { - if (preserveIlmPolicies.includes(policy)) continue - await client.ilm.deleteLifecycle({ name: policy }) - } - - // delete autofollow patterns - const patterns = await client.ccr.getAutoFollowPattern() - for (const { name } of patterns.patterns) { - await client.ccr.deleteAutoFollowPattern({ name }) - } - - // delete all tasks - const nodesTask = await client.tasks.list() - const tasks = Object.keys(nodesTask.nodes) - .reduce((acc, node) => { - const { tasks } = nodesTask.nodes[node] - Object.keys(tasks).forEach(id => { - if (tasks[id].cancellable) acc.push(id) - }) - return acc - }, []) - - await helper.runInParallel( - client, 'tasks.cancel', - tasks.map(id => ({ task_id: id })) - ) - - // cleanup ml - const jobsList = await client.ml.getJobs() - const jobsIds = jobsList.jobs.map(j => j.job_id) - await helper.runInParallel( - client, 'ml.deleteJob', - jobsIds.map(j => ({ job_id: j, force: true })) - ) - - const dataFrame = await client.ml.getDataFrameAnalytics() - const dataFrameIds = dataFrame.data_frame_analytics.map(d => d.id) - await helper.runInParallel( - client, 'ml.deleteDataFrameAnalytics', - dataFrameIds.map(d => ({ id: d, force: true })) - ) - - const calendars = await client.ml.getCalendars() - const calendarsId = calendars.calendars.map(c => c.calendar_id) - await helper.runInParallel( - client, 'ml.deleteCalendar', - calendarsId.map(c => ({ calendar_id: c })) - ) - - const training = await client.ml.getTrainedModels() - const trainingId = training.trained_model_configs - .filter(t => t.created_by !== '_xpack') - .map(t => t.model_id) - await helper.runInParallel( - client, 'ml.deleteTrainedModel', - trainingId.map(t => ({ model_id: t, force: true })) - ) - - // cleanup transforms - const transforms = await client.transform.getTransform() - const transformsId = transforms.transforms.map(t => t.id) - await helper.runInParallel( - client, 'transform.deleteTransform', - transformsId.map(t => ({ transform_id: t, force: true })) - ) - } - - const shutdownNodes = await client.shutdown.getNode() - if (shutdownNodes._nodes == null && shutdownNodes.cluster_name == null) { - for (const node of shutdownNodes.nodes) { - await client.shutdown.deleteNode({ node_id: node.node_id }) - } - } - - // wait for pending task before resolving the promise - await sleep(100) - while (true) { - const body = await client.cluster.pendingTasks() - if (body.tasks.length === 0) break - await sleep(500) - } - } - - /** - * Runs the given test. - * It runs the test components in the following order: - * - skip check - * - xpack user - * - setup - * - the actual test - * - teardown - * - xpack cleanup - * - cleanup - * @param {object} setup (null if not needed) - * @param {object} test - * @param {object} teardown (null if not needed) - * @returns {Promise} - */ - async function run (setup, test, teardown, stats, junit) { - // if we should skip a feature in the setup/teardown section - // we should skip the entire test file - const skip = getSkip(setup) || getSkip(teardown) - if (skip && shouldSkip(esVersion, skip)) { - junit.skip(skip) - logSkip(skip) - return - } - - if (isXPack) { - // Some xpack test requires this user - // tap.comment('Creating x-pack user') - try { - await client.security.putUser({ - username: 'x_pack_rest_user', - password: 'x-pack-test-password', - roles: ['superuser'] - }) - } catch (err) { - assert.ifError(err, 'should not error: security.putUser') - } - } - - if (setup) await exec('Setup', setup, stats, junit) - - await exec('Test', test, stats, junit) - - if (teardown) await exec('Teardown', teardown, stats, junit) - - await cleanup(isXPack) - } - - /** - * Fill the stashed values of a command - * let's say the we have stashed the `master` value, - * is_true: nodes.$master.transport.profiles - * becomes - * is_true: nodes.new_value.transport.profiles - * @param {object|string} the action to update - * @returns {object|string} the updated action - */ - function fillStashedValues (obj) { - if (typeof obj === 'string') { - return getStashedValues(obj) - } - // iterate every key of the object - for (const key in obj) { - const val = obj[key] - // if the key value is a string, and the string includes '${' - // that we must update the content of '${...}'. - // eg: 'Basic ${auth}' we search the stahed value 'auth' - // and the resulting value will be 'Basic valueOfAuth' - if (typeof val === 'string' && val.includes('${')) { - while (obj[key].includes('${')) { - const val = obj[key] - const start = val.indexOf('${') - const end = val.indexOf('}', val.indexOf('${')) - const stashedKey = val.slice(start + 2, end) - const stashed = stash.get(stashedKey) - obj[key] = val.slice(0, start) + stashed + val.slice(end + 1) - } - continue - } - // handle json strings, eg: '{"hello":"$world"}' - if (typeof val === 'string' && val.includes('"$')) { - while (obj[key].includes('"$')) { - const val = obj[key] - const start = val.indexOf('"$') - const end = val.indexOf('"', start + 1) - const stashedKey = val.slice(start + 2, end) - const stashed = '"' + stash.get(stashedKey) + '"' - obj[key] = val.slice(0, start) + stashed + val.slice(end + 1) - } - continue - } - // if the key value is a string, and the string includes '$' - // we run the "update value" code - if (typeof val === 'string' && val.includes('$')) { - // update the key value - obj[key] = getStashedValues(val) - continue - } - - // go deep in the object - if (val !== null && typeof val === 'object') { - fillStashedValues(val) - } - } - - return obj - - function getStashedValues (str) { - const arr = str - // we split the string on the dots - // handle the key with a dot inside that is not a part of the path - .split(/(? { - if (part[0] === '$') { - const stashed = stash.get(part.slice(1)) - if (stashed == null) { - throw new Error(`Cannot find stashed value '${part}' for '${JSON.stringify(obj)}'`) - } - return stashed - } - return part - }) - - // recreate the string value only if the array length is higher than one - // otherwise return the first element which in some test this could be a number, - // and call `.join` will coerce it to a string. - return arr.length > 1 ? arr.join('.') : arr[0] - } - } - - /** - * Stashes a value - * @param {string} the key to search in the previous response - * @param {string} the name to identify the stashed value - * @returns {TestRunner} - */ - function set (key, name) { - if (key.includes('_arbitrary_key_')) { - let currentVisit = null - for (const path of key.split('.')) { - if (path === '_arbitrary_key_') { - const keys = Object.keys(currentVisit) - const arbitraryKey = keys[getRandomInt(0, keys.length)] - stash.set(name, arbitraryKey) - } else { - currentVisit = delve(response, path) - } - } - } else { - stash.set(name, delve(response, key)) - } - } - - /** - * Applies a given transformation and stashes the result. - * @param {string} the name to identify the stashed value - * @param {string} the transformation function as string - * @returns {TestRunner} - */ - function transform_and_set (name, transform) { - if (/base64EncodeCredentials/.test(transform)) { - const [user, password] = transform - .slice(transform.indexOf('(') + 1, -1) - .replace(/ /g, '') - .split(',') - const userAndPassword = `${delve(response, user)}:${delve(response, password)}` - stash.set(name, Buffer.from(userAndPassword).toString('base64')) - } else { - throw new Error(`Unknown transform: '${transform}'`) - } - } - - /** - * Runs a client command - * @param {object} the action to perform - * @returns {Promise} - */ - async function doAction (action, stats) { - const cmd = await updateParams(parseDo(action)) - let api - try { - api = delve(client, cmd.method).bind(client) - } catch (err) { - console.error(`\nError: Cannot find the method '${cmd.method}' in the client.\n`) - process.exit(1) - } - - if (action.headers) { - switch (action.headers['Content-Type'] || action.headers['content-type']) { - case 'application/json': - delete action.headers['Content-Type'] - delete action.headers['content-type'] - action.headers['Content-Type'] = `application/vnd.elasticsearch+json; compatible-with=${packageJson.version.split('.')[0]}` - break - case 'application/x-ndjson': - delete action.headers['Content-Type'] - delete action.headers['content-type'] - action.headers['Content-Type'] = `application/vnd.elasticsearch+x-ndjson; compatible-with=${packageJson.version.split('.')[0]}` - break - } - } - - const options = { ignore: cmd.params.ignore, headers: action.headers, meta: true } - if (!Array.isArray(options.ignore)) options.ignore = [options.ignore] - if (cmd.params.ignore) delete cmd.params.ignore - - // ndjson apis should always send the body as an array - if (isNDJson(cmd.api) && !Array.isArray(cmd.params.body)) { - cmd.params.body = [cmd.params.body] - } - - if (typeof cmd.params.body === 'string' && !isNDJson(cmd.api)) { - cmd.params.body = JSON.parse(cmd.params.body) - } - - let err, result; - try { - [err, result] = await to(api(cmd.params, options)) - } catch (exc) { - if (JSON.stringify(exc).includes('resource_already_exists_exception')) { - console.warn(`Resource already exists: ${JSON.stringify(cmd.params)}`) - // setup task was already done because cleanup didn't catch it? do nothing - } else { - throw exc - } - } - let warnings = result ? result.warnings : null - const body = result ? result.body : null - - if (action.warnings && warnings === null) { - assert.fail('We should get a warning header', action.warnings) - } else if (!action.warnings && warnings !== null) { - // if there is only the 'default shard will change' - // warning we skip the check, because the yaml - // spec may not be updated - let hasDefaultShardsWarning = false - warnings.forEach(h => { - if (/default\snumber\sof\sshards/g.test(h)) { - hasDefaultShardsWarning = true - } - }) - - if (hasDefaultShardsWarning === true && warnings.length > 1) { - assert.fail('We are not expecting warnings', warnings) - } - } else if (action.warnings && warnings !== null) { - // if the yaml warnings do not contain the - // 'default shard will change' warning - // we do not check it presence in the warnings array - // because the yaml spec may not be updated - let hasDefaultShardsWarning = false - action.warnings.forEach(h => { - if (/default\snumber\sof\sshards/g.test(h)) { - hasDefaultShardsWarning = true - } - }) - - if (hasDefaultShardsWarning === false) { - warnings = warnings.filter(h => !h.test(/default\snumber\sof\sshards/g)) - } - - stats.assertions += 1 - assert.deepEqual(warnings, action.warnings) - } - - if (action.catch) { - stats.assertions += 1 - assert.ok(err, `Expecting an error, but instead got ${JSON.stringify(err)}, the response was ${JSON.stringify(result)}`) - assert.ok( - parseDoError(err, action.catch), - `the error should match: ${action.catch}, found ${JSON.stringify(err.body)}` - ) - try { - response = JSON.parse(err.body) - } catch (e) { - response = err.body - } - } else { - stats.assertions += 1 - assert.ifError(err, `should not error: ${cmd.method}`, action) - response = body - } - } - - /** - * Runs an actual test - * @param {string} the name of the test - * @param {object} the actions to perform - * @returns {Promise} - */ - async function exec (name, actions, stats, junit) { - // tap.comment(name) - for (const action of actions) { - if (action.skip) { - if (shouldSkip(esVersion, action.skip)) { - junit.skip(fillStashedValues(action.skip)) - logSkip(fillStashedValues(action.skip)) - break - } - } - - if (action.do) { - await doAction(fillStashedValues(action.do), stats) - } - - if (action.set) { - const key = Object.keys(action.set)[0] - set(fillStashedValues(key), action.set[key]) - } - - if (action.transform_and_set) { - const key = Object.keys(action.transform_and_set)[0] - transform_and_set(key, action.transform_and_set[key]) - } - - if (action.match) { - stats.assertions += 1 - const key = Object.keys(action.match)[0] - match( - // in some cases, the yaml refers to the body with an empty string - key.split('.')[0] === '$body' || key === '' - ? response - : delve(response, fillStashedValues(key)), - key.split('.')[0] === '$body' - ? action.match[key] - : fillStashedValues(action.match)[key], - action.match, - response - ) - } - - if (action.lt) { - stats.assertions += 1 - const key = Object.keys(action.lt)[0] - lt( - delve(response, fillStashedValues(key)), - fillStashedValues(action.lt)[key], - response - ) - } - - if (action.gt) { - stats.assertions += 1 - const key = Object.keys(action.gt)[0] - gt( - delve(response, fillStashedValues(key)), - fillStashedValues(action.gt)[key], - response - ) - } - - if (action.lte) { - stats.assertions += 1 - const key = Object.keys(action.lte)[0] - lte( - delve(response, fillStashedValues(key)), - fillStashedValues(action.lte)[key], - response - ) - } - - if (action.gte) { - stats.assertions += 1 - const key = Object.keys(action.gte)[0] - gte( - delve(response, fillStashedValues(key)), - fillStashedValues(action.gte)[key], - response - ) - } - - if (action.length) { - stats.assertions += 1 - const key = Object.keys(action.length)[0] - length( - key === '$body' || key === '' - ? response - : delve(response, fillStashedValues(key)), - key === '$body' - ? action.length[key] - : fillStashedValues(action.length)[key], - response - ) - } - - if (action.is_true) { - stats.assertions += 1 - const isTrue = fillStashedValues(action.is_true) - is_true( - delve(response, isTrue), - isTrue, - response - ) - } - - if (action.is_false) { - stats.assertions += 1 - const isFalse = fillStashedValues(action.is_false) - is_false( - delve(response, isFalse), - isFalse, - response - ) - } - } - } - - return { run } -} - -/** - * Asserts that the given value is truthy - * @param {any} the value to check - * @param {string} an optional message - * @param {any} debugging metadata to attach to any assertion errors - * @returns {TestRunner} - */ -function is_true (val, msg, response) { - try { - assert.ok((typeof val === 'string' && val.toLowerCase() === 'true') || val, `expect truthy value: ${msg} - value: ${JSON.stringify(val)}`) - } catch (err) { - err.response = JSON.stringify(response) - throw err - } -} - -/** - * Asserts that the given value is falsey - * @param {any} the value to check - * @param {string} an optional message - * @param {any} debugging metadata to attach to any assertion errors - * @returns {TestRunner} - */ -function is_false (val, msg, response) { - try { - assert.ok((typeof val === 'string' && val.toLowerCase() === 'false') || !val, `expect falsey value: ${msg} - value: ${JSON.stringify(val)}`) - } catch (err) { - err.response = JSON.stringify(response) - throw err - } -} - -/** - * Asserts that two values are the same - * @param {any} the first value - * @param {any} the second value - * @param {any} debugging metadata to attach to any assertion errors - * @returns {TestRunner} - */ -function match (val1, val2, action, response) { - try { - // both values are objects - if (typeof val1 === 'object' && typeof val2 === 'object') { - assert.deepEqual(val1, val2, typeof action === 'object' ? JSON.stringify(action) : action) - // the first value is the body as string and the second a pattern string - } else if ( - typeof val1 === 'string' && typeof val2 === 'string' && - val2.startsWith('/') && (val2.endsWith('/\n') || val2.endsWith('/')) - ) { - const regStr = val2 - .replace(/(^|[^\\])#.*/g, '$1') - .replace(/(^|[^\\])\s+/g, '$1') - .slice(1, -1) - // 'm' adds the support for multiline regex - assert.match(val1, new RegExp(regStr, 'm'), `should match pattern provided: ${val2}, but got: ${val1}: ${JSON.stringify(action)}`) - } else if (typeof val1 === 'string' && typeof val2 === 'string') { - // string comparison - assert.include(val1, val2, `should include pattern provided: ${val2}, but got: ${val1}: ${JSON.stringify(action)}`) - } else { - // everything else - assert.equal(val1, val2, `should be equal: ${val1} - ${val2}, action: ${JSON.stringify(action)}`) - } - } catch (err) { - err.response = JSON.stringify(response) - throw err - } -} - -/** - * Asserts that the first value is less than the second - * It also verifies that the two values are numbers - * @param {any} the first value - * @param {any} the second value - * @param {any} debugging metadata to attach to any assertion errors - * @returns {TestRunner} - */ -function lt (val1, val2, response) { - try { - ;[val1, val2] = getNumbers(val1, val2) - assert.ok(val1 < val2) - } catch (err) { - err.response = JSON.stringify(response) - throw err - } -} - -/** - * Asserts that the first value is greater than the second - * It also verifies that the two values are numbers - * @param {any} the first value - * @param {any} the second value - * @param {any} debugging metadata to attach to any assertion errors - * @returns {TestRunner} - */ -function gt (val1, val2, response) { - try { - ;[val1, val2] = getNumbers(val1, val2) - assert.ok(val1 > val2) - } catch (err) { - err.response = JSON.stringify(response) - throw err - } -} - -/** - * Asserts that the first value is less than or equal the second - * It also verifies that the two values are numbers - * @param {any} the first value - * @param {any} the second value - * @param {any} debugging metadata to attach to any assertion errors - * @returns {TestRunner} - */ -function lte (val1, val2, response) { - try { - ;[val1, val2] = getNumbers(val1, val2) - assert.ok(val1 <= val2) - } catch (err) { - err.response = JSON.stringify(response) - throw err - } -} - -/** - * Asserts that the first value is greater than or equal the second - * It also verifies that the two values are numbers - * @param {any} the first value - * @param {any} the second value - * @param {any} debugging metadata to attach to any assertion errors - * @returns {TestRunner} -*/ -function gte (val1, val2, response) { - try { - ;[val1, val2] = getNumbers(val1, val2) - assert.ok(val1 >= val2) - } catch (err) { - err.response = JSON.stringify(response) - throw err - } -} - -/** - * Asserts that the given value has the specified length - * @param {string|object|array} the object to check - * @param {number} the expected length - * @param {any} debugging metadata to attach to any assertion errors - * @returns {TestRunner} - */ -function length (val, len, response) { - try { - if (typeof val === 'string' || Array.isArray(val)) { - assert.equal(val.length, len) - } else if (typeof val === 'object' && val !== null) { - assert.equal(Object.keys(val).length, len) - } else { - assert.fail(`length: the given value is invalid: ${val}`) - } - } catch (err) { - err.response = JSON.stringify(response) - throw err - } -} - -/** - * Gets a `do` action object and returns a structured object, - * where the action is the key and the parameter is the value. - * Eg: - * { - * 'indices.create': { - * 'index': 'test' - * }, - * 'warnings': [ - * '[index] is deprecated' - * ] - * } - * becomes - * { - * method: 'indices.create', - * params: { - * index: 'test' - * }, - * warnings: [ - * '[index] is deprecated' - * ] - * } - * @param {object} - * @returns {object} - */ -function parseDo (action) { - action = JSON.parse(JSON.stringify(action)) - - if (typeof action === 'string') action = {[action]: {}} - if (Array.isArray(action)) action = action[0] - - return Object.keys(action).reduce((acc, val) => { - switch (val) { - case 'catch': - acc.catch = action.catch - break - case 'warnings': - acc.warnings = action.warnings - break - case 'node_selector': - acc.node_selector = action.node_selector - break - default: - // converts underscore to camelCase - // eg: put_mapping => putMapping - acc.method = val.replace(/_([a-z])/g, g => g[1].toUpperCase()) - acc.api = val - acc.params = action[val] // camelify(action[val]) - if (typeof acc.params.body === 'string') { - try { - acc.params.body = JSON.parse(acc.params.body) - } catch (err) {} - } - } - return acc - }, {}) - - // function camelify (obj) { - // const newObj = {} - - // // TODO: add camelCase support for this fields - // const doNotCamelify = ['copy_settings'] - - // for (const key in obj) { - // const val = obj[key] - // let newKey = key - // if (!~doNotCamelify.indexOf(key)) { - // // if the key starts with `_` we should not camelify the first occurence - // // eg: _source_include => _sourceInclude - // newKey = key[0] === '_' - // ? '_' + key.slice(1).replace(/_([a-z])/g, k => k[1].toUpperCase()) - // : key.replace(/_([a-z])/g, k => k[1].toUpperCase()) - // } - - // if ( - // val !== null && - // typeof val === 'object' && - // !Array.isArray(val) && - // key !== 'body' - // ) { - // newObj[newKey] = camelify(val) - // } else { - // newObj[newKey] = val - // } - // } - - // return newObj - // } -} - -function parseDoError (err, spec) { - const httpErrors = { - bad_request: 400, - unauthorized: 401, - forbidden: 403, - missing: 404, - request_timeout: 408, - conflict: 409, - unavailable: 503 - } - - if (httpErrors[spec]) { - return err.statusCode === httpErrors[spec] - } - - if (spec === 'request') { - return err.statusCode >= 400 && err.statusCode < 600 - } - - if (spec.startsWith('/') && spec.endsWith('/')) { - return new RegExp(spec.slice(1, -1), 'g').test(JSON.stringify(err.body)) - } - - if (spec === 'param') { - // the new client do not perform runtime checks, - // but it relies on typescript informing the user - return true - // return err instanceof ConfigurationError - } - - return false -} - -function getSkip (arr) { - if (!Array.isArray(arr)) return null - for (let i = 0; i < arr.length; i++) { - if (arr[i].skip) return arr[i].skip - } - return null -} - -// Gets two *maybe* numbers and returns two valida numbers -// it throws if one or both are not a valid number -// the returned value is an array with the new values -function getNumbers (val1, val2) { - const val1Numeric = Number(val1) - if (isNaN(val1Numeric)) { - throw new TypeError(`val1 is not a valid number: ${val1}`) - } - const val2Numeric = Number(val2) - if (isNaN(val2Numeric)) { - throw new TypeError(`val2 is not a valid number: ${val2}`) - } - return [val1Numeric, val2Numeric] -} - -function getRandomInt (min, max) { - return Math.floor(Math.random() * (max - min)) + min -} - -/** - * Logs a skip - * @param {object} the actions - * @returns {TestRunner} - */ -function logSkip (action) { - if (action.reason && action.version) { - console.log(`Skip: ${action.reason} (${action.version})`) - } else if (action.features) { - console.log(`Skip: ${JSON.stringify(action.features)})`) - } else { - console.log('Skipped') - } -} - -/** - * Decides if a test should be skipped - * @param {object} the actions - * @returns {boolean} - */ -function shouldSkip (esVersion, action) { - let shouldSkip = false - // skip based on the version - if (action.version) { - if (action.version.trim() === 'all') return true - const versions = action.version.split(',').filter(Boolean) - for (const version of versions) { - const [min, max] = version.split('-').map(v => v.trim()) - // if both `min` and `max` are specified - if (min && max) { - shouldSkip = semver.satisfies(esVersion, action.version) - // if only `min` is specified - } else if (min) { - shouldSkip = semver.gte(esVersion, min) - // if only `max` is specified - } else if (max) { - shouldSkip = semver.lte(esVersion, max) - // something went wrong! - } else { - throw new Error(`skip: Bad version range: ${action.version}`) - } - } - } - - if (shouldSkip) return true - - if (action.features) { - if (!Array.isArray(action.features)) action.features = [action.features] - // returns true if one of the features is not present in the supportedFeatures - shouldSkip = !!action.features.filter(f => !~supportedFeatures.indexOf(f)).length - } - - if (shouldSkip) return true - - return false -} - -function isNDJson (api) { - const spec = require(join(locations.specFolder, `${api}.json`)) - const { content_type } = spec[Object.keys(spec)[0]].headers - return Boolean(content_type && content_type.includes('application/x-ndjson')) -} - -/** - * Updates the array syntax of keys and values - * eg: 'hits.hits.1.stuff' to 'hits.hits[1].stuff' - * @param {object} the action to update - * @returns {obj} the updated action - */ -// function updateArraySyntax (obj) { -// const newObj = {} - -// for (const key in obj) { -// const newKey = key.replace(/\.\d{1,}\./g, v => `[${v.slice(1, -1)}].`) -// const val = obj[key] - -// if (typeof val === 'string') { -// newObj[newKey] = val.replace(/\.\d{1,}\./g, v => `[${v.slice(1, -1)}].`) -// } else if (val !== null && typeof val === 'object') { -// newObj[newKey] = updateArraySyntax(val) -// } else { -// newObj[newKey] = val -// } -// } - -// return newObj -// } - -module.exports = build diff --git a/test/mock/index.js b/test/mock/index.js index d9525299d..6d6452995 100644 --- a/test/mock/index.js +++ b/test/mock/index.js @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ 'use strict' diff --git a/test/unit/api.test.ts b/test/unit/api.test.ts index a0f513256..452f53805 100644 --- a/test/unit/api.test.ts +++ b/test/unit/api.test.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ import { test } from 'tap' diff --git a/test/unit/client.test.ts b/test/unit/client.test.ts index 2e64e5927..7c4aa3339 100644 --- a/test/unit/client.test.ts +++ b/test/unit/client.test.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ import * as http from 'node:http' @@ -23,7 +9,7 @@ import { setTimeout } from 'node:timers/promises' import { test } from 'tap' import FakeTimers from '@sinonjs/fake-timers' import { buildServer, connection } from '../utils' -import { Client, errors } from '../..' +import { Client, errors, SniffingTransport } from '../..' import * as symbols from '@elastic/transport/lib/symbols' import { BaseConnectionPool, CloudConnectionPool, WeightedConnectionPool, HttpConnection } from '@elastic/transport' @@ -301,9 +287,25 @@ test('Elastic Cloud config', t => { t.equal(connection?.url.hostname, 'abcd.localhost') t.equal(connection?.url.protocol, 'https:') + t.test('Invalid Cloud ID will throw ConfigurationError', t => { + t.throws(() => new Client({ + cloud : { + id : 'invalidCloudIdThatIsNotBase64' + }, + auth : { + username: 'elastic', + password: 'changeme' + } + + }), errors.ConfigurationError) + t.end() + }) + t.end() }) + + test('Override default Elastic Cloud options', t => { const client = new Client({ cloud: { @@ -572,3 +574,68 @@ test('disablePrototypePoisoningProtection is true by default', async t => { constructorAction: 'ignore' }) }) + +test('serverless defaults', t => { + t.test('uses CloudConnectionPool by default', t => { + const client = new Client({ node: 'http://localhost:9200/', serverMode: 'serverless' }) + t.ok(client.connectionPool instanceof CloudConnectionPool) + t.equal(client.connectionPool.size, 1) + t.end() + }) + + t.test('selects one node if multiple are provided', t => { + const client = new Client({ nodes: ['http://localhost:9200/', 'http://localhost:9201/'], serverMode: 'serverless' }) + t.equal(client.connectionPool.size, 1) + t.end() + }) + + t.test('uses TLSv1_2_method by default', t => { + const client = new Client({ + node: 'https://localhost:9200/', + serverMode: 'serverless', + auth: { + username: 'elastic', + password: 'changeme' + } + }) + + const connection = client.connectionPool.connections.find(c => c.id === 'https://localhost:9200/') + + t.equal(connection?.headers?.authorization, `Basic ${Buffer.from('elastic:changeme').toString('base64')}`) + t.same(connection?.tls, { secureProtocol: 'TLSv1_2_method' }) + t.equal(connection?.url.hostname, 'localhost') + t.equal(connection?.url.protocol, 'https:') + + t.end() + }) + + t.test('elastic-api-version header exists on all requests', async t => { + t.plan(1) + + const Connection = connection.buildMockConnection({ + onRequest (opts) { + t.equal(opts.headers?.['elastic-api-version'], '2023-10-31') + return { + statusCode: 200, + body: { hello: 'world' } + } + } + }) + + const client = new Client({ + node: 'http://localhost:9200/', + serverMode: 'serverless', + Connection, + }) + + await client.transport.request({ method: 'GET', path: '/' }) + }) + + t.test('sniffing transport not used', t => { + const client = new Client({ node: 'http://localhost:9200/', serverMode: 'serverless' }) + t.ok(!(client.transport instanceof SniffingTransport)) + t.end() + }) + + t.end() +}) diff --git a/test/unit/helpers/bulk.test.ts b/test/unit/helpers/bulk.test.ts index d45d2d003..45487aaa4 100644 --- a/test/unit/helpers/bulk.test.ts +++ b/test/unit/helpers/bulk.test.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ import FakeTimers from '@sinonjs/fake-timers' diff --git a/test/unit/helpers/esql.test.ts b/test/unit/helpers/esql.test.ts index c91e3cb03..32f363674 100644 --- a/test/unit/helpers/esql.test.ts +++ b/test/unit/helpers/esql.test.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ import { test } from 'tap' @@ -172,17 +158,28 @@ test('ES|QL helper', t => { t.end() }) - test('toArrowReader', t => { - t.test('Parses a binary response into an Arrow stream reader', async t => { - const binaryContent = '/////zABAAAQAAAAAAAKAA4ABgANAAgACgAAAAAABAAQAAAAAAEKAAwAAAAIAAQACgAAAAgAAAAIAAAAAAAAAAIAAAB8AAAABAAAAJ7///8UAAAARAAAAEQAAAAAAAoBRAAAAAEAAAAEAAAAjP///wgAAAAQAAAABAAAAGRhdGUAAAAADAAAAGVsYXN0aWM6dHlwZQAAAAAAAAAAgv///wAAAQAEAAAAZGF0ZQAAEgAYABQAEwASAAwAAAAIAAQAEgAAABQAAABMAAAAVAAAAAAAAwFUAAAAAQAAAAwAAAAIAAwACAAEAAgAAAAIAAAAEAAAAAYAAABkb3VibGUAAAwAAABlbGFzdGljOnR5cGUAAAAAAAAAAAAABgAIAAYABgAAAAAAAgAGAAAAYW1vdW50AAAAAAAA/////7gAAAAUAAAAAAAAAAwAFgAOABUAEAAEAAwAAABgAAAAAAAAAAAABAAQAAAAAAMKABgADAAIAAQACgAAABQAAABYAAAABQAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAQAAAAAAAAAIAAAAAAAAACgAAAAAAAAAMAAAAAAAAAABAAAAAAAAADgAAAAAAAAAKAAAAAAAAAAAAAAAAgAAAAUAAAAAAAAAAAAAAAAAAAAFAAAAAAAAAAAAAAAAAAAAHwAAAAAAAAAAAACgmZkTQAAAAGBmZiBAAAAAAAAAL0AAAADAzMwjQAAAAMDMzCtAHwAAAAAAAADV6yywkgEAANWPBquSAQAA1TPgpZIBAADV17mgkgEAANV7k5uSAQAA/////wAAAAA=' + test('toArrowReader', async t => { + const testRecords = [ + { amount: 4.900000095367432, }, + { amount: 8.199999809265137, }, + { amount: 15.5, }, + { amount: 9.899999618530273, }, + { amount: 13.899999618530273, }, + ] + // build reusable Arrow table + const table = arrow.tableFromJSON(testRecords) + const rawData = await arrow.RecordBatchStreamWriter.writeAll(table).toUint8Array() + + t.test('Parses a binary response into an Arrow stream reader', async t => { const MockConnection = connection.buildMockConnection({ onRequest (_params) { return { - body: Buffer.from(binaryContent, 'base64'), + body: Buffer.from(rawData), statusCode: 200, headers: { - 'content-type': 'application/vnd.elasticsearch+arrow+stream' + 'content-type': 'application/vnd.elasticsearch+arrow+stream', + 'transfer-encoding': 'chunked' } } } @@ -196,26 +193,28 @@ test('ES|QL helper', t => { const result = await client.helpers.esql({ query: 'FROM sample_data' }).toArrowReader() t.ok(result.isStream()) - const recordBatch = result.next().value - t.same(recordBatch.get(0)?.toJSON(), { - amount: 4.900000095367432, - date: 1729532586965, - }) + let count = 0 + for await (const recordBatch of result) { + for (const record of recordBatch) { + t.same(record.toJSON(), testRecords[count]) + count++ + } + } + t.end() }) t.test('ESQL helper uses correct x-elastic-client-meta helper value', async t => { - const binaryContent = '/////zABAAAQAAAAAAAKAA4ABgANAAgACgAAAAAABAAQAAAAAAEKAAwAAAAIAAQACgAAAAgAAAAIAAAAAAAAAAIAAAB8AAAABAAAAJ7///8UAAAARAAAAEQAAAAAAAoBRAAAAAEAAAAEAAAAjP///wgAAAAQAAAABAAAAGRhdGUAAAAADAAAAGVsYXN0aWM6dHlwZQAAAAAAAAAAgv///wAAAQAEAAAAZGF0ZQAAEgAYABQAEwASAAwAAAAIAAQAEgAAABQAAABMAAAAVAAAAAAAAwFUAAAAAQAAAAwAAAAIAAwACAAEAAgAAAAIAAAAEAAAAAYAAABkb3VibGUAAAwAAABlbGFzdGljOnR5cGUAAAAAAAAAAAAABgAIAAYABgAAAAAAAgAGAAAAYW1vdW50AAAAAAAA/////7gAAAAUAAAAAAAAAAwAFgAOABUAEAAEAAwAAABgAAAAAAAAAAAABAAQAAAAAAMKABgADAAIAAQACgAAABQAAABYAAAABQAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAQAAAAAAAAAIAAAAAAAAACgAAAAAAAAAMAAAAAAAAAABAAAAAAAAADgAAAAAAAAAKAAAAAAAAAAAAAAAAgAAAAUAAAAAAAAAAAAAAAAAAAAFAAAAAAAAAAAAAAAAAAAAHwAAAAAAAAAAAACgmZkTQAAAAGBmZiBAAAAAAAAAL0AAAADAzMwjQAAAAMDMzCtAHwAAAAAAAADV6yywkgEAANWPBquSAQAA1TPgpZIBAADV17mgkgEAANV7k5uSAQAA/////wAAAAA=' - const MockConnection = connection.buildMockConnection({ onRequest (params) { const header = params.headers?.['x-elastic-client-meta'] ?? '' t.ok(header.includes('h=qa'), `Client meta header does not include ESQL helper value: ${header}`) return { - body: Buffer.from(binaryContent, 'base64'), + body: Buffer.from(rawData), statusCode: 200, headers: { - 'content-type': 'application/vnd.elasticsearch+arrow+stream' + 'content-type': 'application/vnd.elasticsearch+arrow+stream', + 'transfer-encoding': 'chunked' } } } @@ -254,10 +253,12 @@ test('ES|QL helper', t => { new arrow.RecordBatch(schema, batch3.data), ]) + const rawData = await arrow.RecordBatchStreamWriter.writeAll(table).toUint8Array() + const MockConnection = connection.buildMockConnection({ onRequest (_params) { return { - body: Buffer.from(arrow.tableToIPC(table, "stream")), + body: Buffer.from(rawData), statusCode: 200, headers: { 'content-type': 'application/vnd.elasticsearch+arrow+stream' @@ -275,7 +276,7 @@ test('ES|QL helper', t => { t.ok(result.isStream()) let counter = 0 - for (const batch of result) { + for await (const batch of result) { for (const row of batch) { counter++ const { id, val } = row.toJSON() diff --git a/test/unit/helpers/msearch.test.ts b/test/unit/helpers/msearch.test.ts index ba2457587..a87d86c04 100644 --- a/test/unit/helpers/msearch.test.ts +++ b/test/unit/helpers/msearch.test.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ import { test } from 'tap' diff --git a/test/unit/helpers/scroll.test.ts b/test/unit/helpers/scroll.test.ts index 88361bd7c..ae01989a5 100644 --- a/test/unit/helpers/scroll.test.ts +++ b/test/unit/helpers/scroll.test.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ import { test } from 'tap' diff --git a/test/unit/helpers/search.test.ts b/test/unit/helpers/search.test.ts index e318571a8..8eddde16a 100644 --- a/test/unit/helpers/search.test.ts +++ b/test/unit/helpers/search.test.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ import { test } from 'tap' @@ -109,4 +95,3 @@ test('Merge filter paths (snake_case)', async t => { { _id: '3', three: 'three' } ]) }) - diff --git a/test/utils/MockConnection.ts b/test/utils/MockConnection.ts index 19af3dd54..c1bd25873 100644 --- a/test/utils/MockConnection.ts +++ b/test/utils/MockConnection.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ import assert from 'assert' diff --git a/test/utils/buildCluster.ts b/test/utils/buildCluster.ts index 79a8ba71b..5b101f757 100644 --- a/test/utils/buildCluster.ts +++ b/test/utils/buildCluster.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ import Debug from 'debug' diff --git a/test/utils/buildProxy.ts b/test/utils/buildProxy.ts index 37f58d55c..314a08c47 100644 --- a/test/utils/buildProxy.ts +++ b/test/utils/buildProxy.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ // @ts-ignore diff --git a/test/utils/buildServer.ts b/test/utils/buildServer.ts index 586f1b68f..c2fcfc065 100644 --- a/test/utils/buildServer.ts +++ b/test/utils/buildServer.ts @@ -1,20 +1,6 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ import { readFileSync } from 'fs' diff --git a/test/utils/index.ts b/test/utils/index.ts index 62d5cc578..6b74fa033 100644 --- a/test/utils/index.ts +++ b/test/utils/index.ts @@ -1,26 +1,12 @@ /* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Copyright Elasticsearch B.V. and contributors + * SPDX-License-Identifier: Apache-2.0 */ import buildServer from './buildServer' import * as connection from './MockConnection' import buildCluster from './buildCluster' -import * as buildProxy from './buildProxy' +import * as buildProxy from './buildProxy' export { buildServer,